xref: /linux/net/bluetooth/l2cap_core.c (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			if (chan->dcid == L2CAP_CID_ATT)
508 				chan->scid = L2CAP_CID_ATT;
509 			else
510 				chan->scid = l2cap_alloc_cid(conn);
511 		} else {
512 			/* Alloc CID for connection-oriented socket */
513 			chan->scid = l2cap_alloc_cid(conn);
514 			chan->omtu = L2CAP_DEFAULT_MTU;
515 		}
516 		break;
517 
518 	case L2CAP_CHAN_CONN_LESS:
519 		/* Connectionless socket */
520 		chan->scid = L2CAP_CID_CONN_LESS;
521 		chan->dcid = L2CAP_CID_CONN_LESS;
522 		chan->omtu = L2CAP_DEFAULT_MTU;
523 		break;
524 
525 	case L2CAP_CHAN_CONN_FIX_A2MP:
526 		chan->scid = L2CAP_CID_A2MP;
527 		chan->dcid = L2CAP_CID_A2MP;
528 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 		break;
531 
532 	default:
533 		/* Raw socket can send/recv signalling messages only */
534 		chan->scid = L2CAP_CID_SIGNALING;
535 		chan->dcid = L2CAP_CID_SIGNALING;
536 		chan->omtu = L2CAP_DEFAULT_MTU;
537 	}
538 
539 	chan->local_id		= L2CAP_BESTEFFORT_ID;
540 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
541 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
542 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
543 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545 
546 	l2cap_chan_hold(chan);
547 
548 	hci_conn_hold(conn->hcon);
549 
550 	list_add(&chan->list, &conn->chan_l);
551 }
552 
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 	mutex_lock(&conn->chan_lock);
556 	__l2cap_chan_add(conn, chan);
557 	mutex_unlock(&conn->chan_lock);
558 }
559 
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 	struct l2cap_conn *conn = chan->conn;
563 
564 	__clear_chan_timer(chan);
565 
566 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567 
568 	if (conn) {
569 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 		/* Delete from channel list */
571 		list_del(&chan->list);
572 
573 		l2cap_chan_put(chan);
574 
575 		chan->conn = NULL;
576 
577 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 			hci_conn_drop(conn->hcon);
579 
580 		if (mgr && mgr->bredr_chan == chan)
581 			mgr->bredr_chan = NULL;
582 	}
583 
584 	if (chan->hs_hchan) {
585 		struct hci_chan *hs_hchan = chan->hs_hchan;
586 
587 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 		amp_disconnect_logical_link(hs_hchan);
589 	}
590 
591 	chan->ops->teardown(chan, err);
592 
593 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 		return;
595 
596 	switch(chan->mode) {
597 	case L2CAP_MODE_BASIC:
598 		break;
599 
600 	case L2CAP_MODE_ERTM:
601 		__clear_retrans_timer(chan);
602 		__clear_monitor_timer(chan);
603 		__clear_ack_timer(chan);
604 
605 		skb_queue_purge(&chan->srej_q);
606 
607 		l2cap_seq_list_free(&chan->srej_list);
608 		l2cap_seq_list_free(&chan->retrans_list);
609 
610 		/* fall through */
611 
612 	case L2CAP_MODE_STREAMING:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 	}
616 
617 	return;
618 }
619 
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 	struct l2cap_conn *conn = chan->conn;
623 	struct sock *sk = chan->sk;
624 
625 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
626 	       sk);
627 
628 	switch (chan->state) {
629 	case BT_LISTEN:
630 		chan->ops->teardown(chan, 0);
631 		break;
632 
633 	case BT_CONNECTED:
634 	case BT_CONFIG:
635 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 		    conn->hcon->type == ACL_LINK) {
637 			__set_chan_timer(chan, sk->sk_sndtimeo);
638 			l2cap_send_disconn_req(chan, reason);
639 		} else
640 			l2cap_chan_del(chan, reason);
641 		break;
642 
643 	case BT_CONNECT2:
644 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 		    conn->hcon->type == ACL_LINK) {
646 			struct l2cap_conn_rsp rsp;
647 			__u16 result;
648 
649 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 				result = L2CAP_CR_SEC_BLOCK;
651 			else
652 				result = L2CAP_CR_BAD_PSM;
653 			l2cap_state_change(chan, BT_DISCONN);
654 
655 			rsp.scid   = cpu_to_le16(chan->dcid);
656 			rsp.dcid   = cpu_to_le16(chan->scid);
657 			rsp.result = cpu_to_le16(result);
658 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
660 				       sizeof(rsp), &rsp);
661 		}
662 
663 		l2cap_chan_del(chan, reason);
664 		break;
665 
666 	case BT_CONNECT:
667 	case BT_DISCONN:
668 		l2cap_chan_del(chan, reason);
669 		break;
670 
671 	default:
672 		chan->ops->teardown(chan, 0);
673 		break;
674 	}
675 }
676 
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 {
679 	if (chan->chan_type == L2CAP_CHAN_RAW) {
680 		switch (chan->sec_level) {
681 		case BT_SECURITY_HIGH:
682 			return HCI_AT_DEDICATED_BONDING_MITM;
683 		case BT_SECURITY_MEDIUM:
684 			return HCI_AT_DEDICATED_BONDING;
685 		default:
686 			return HCI_AT_NO_BONDING;
687 		}
688 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 		if (chan->sec_level == BT_SECURITY_LOW)
690 			chan->sec_level = BT_SECURITY_SDP;
691 
692 		if (chan->sec_level == BT_SECURITY_HIGH)
693 			return HCI_AT_NO_BONDING_MITM;
694 		else
695 			return HCI_AT_NO_BONDING;
696 	} else {
697 		switch (chan->sec_level) {
698 		case BT_SECURITY_HIGH:
699 			return HCI_AT_GENERAL_BONDING_MITM;
700 		case BT_SECURITY_MEDIUM:
701 			return HCI_AT_GENERAL_BONDING;
702 		default:
703 			return HCI_AT_NO_BONDING;
704 		}
705 	}
706 }
707 
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
710 {
711 	struct l2cap_conn *conn = chan->conn;
712 	__u8 auth_type;
713 
714 	auth_type = l2cap_get_auth_type(chan);
715 
716 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
717 }
718 
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
720 {
721 	u8 id;
722 
723 	/* Get next available identificator.
724 	 *    1 - 128 are used by kernel.
725 	 *  129 - 199 are reserved.
726 	 *  200 - 254 are used by utilities like l2ping, etc.
727 	 */
728 
729 	spin_lock(&conn->lock);
730 
731 	if (++conn->tx_ident > 128)
732 		conn->tx_ident = 1;
733 
734 	id = conn->tx_ident;
735 
736 	spin_unlock(&conn->lock);
737 
738 	return id;
739 }
740 
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
742 			   void *data)
743 {
744 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
745 	u8 flags;
746 
747 	BT_DBG("code 0x%2.2x", code);
748 
749 	if (!skb)
750 		return;
751 
752 	if (lmp_no_flush_capable(conn->hcon->hdev))
753 		flags = ACL_START_NO_FLUSH;
754 	else
755 		flags = ACL_START;
756 
757 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 	skb->priority = HCI_PRIO_MAX;
759 
760 	hci_send_acl(conn->hchan, skb, flags);
761 }
762 
763 static bool __chan_is_moving(struct l2cap_chan *chan)
764 {
765 	return chan->move_state != L2CAP_MOVE_STABLE &&
766 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
767 }
768 
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
770 {
771 	struct hci_conn *hcon = chan->conn->hcon;
772 	u16 flags;
773 
774 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
775 	       skb->priority);
776 
777 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
778 		if (chan->hs_hchan)
779 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
780 		else
781 			kfree_skb(skb);
782 
783 		return;
784 	}
785 
786 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 	    lmp_no_flush_capable(hcon->hdev))
788 		flags = ACL_START_NO_FLUSH;
789 	else
790 		flags = ACL_START;
791 
792 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 	hci_send_acl(chan->conn->hchan, skb, flags);
794 }
795 
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
797 {
798 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
800 
801 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
802 		/* S-Frame */
803 		control->sframe = 1;
804 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
806 
807 		control->sar = 0;
808 		control->txseq = 0;
809 	} else {
810 		/* I-Frame */
811 		control->sframe = 0;
812 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
814 
815 		control->poll = 0;
816 		control->super = 0;
817 	}
818 }
819 
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
821 {
822 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
824 
825 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
826 		/* S-Frame */
827 		control->sframe = 1;
828 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
830 
831 		control->sar = 0;
832 		control->txseq = 0;
833 	} else {
834 		/* I-Frame */
835 		control->sframe = 0;
836 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 
839 		control->poll = 0;
840 		control->super = 0;
841 	}
842 }
843 
844 static inline void __unpack_control(struct l2cap_chan *chan,
845 				    struct sk_buff *skb)
846 {
847 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 		__unpack_extended_control(get_unaligned_le32(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
851 	} else {
852 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
853 					  &bt_cb(skb)->control);
854 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
855 	}
856 }
857 
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
859 {
860 	u32 packed;
861 
862 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
864 
865 	if (control->sframe) {
866 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
869 	} else {
870 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
872 	}
873 
874 	return packed;
875 }
876 
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
878 {
879 	u16 packed;
880 
881 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
883 
884 	if (control->sframe) {
885 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 		packed |= L2CAP_CTRL_FRAME_TYPE;
888 	} else {
889 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
891 	}
892 
893 	return packed;
894 }
895 
896 static inline void __pack_control(struct l2cap_chan *chan,
897 				  struct l2cap_ctrl *control,
898 				  struct sk_buff *skb)
899 {
900 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 		put_unaligned_le32(__pack_extended_control(control),
902 				   skb->data + L2CAP_HDR_SIZE);
903 	} else {
904 		put_unaligned_le16(__pack_enhanced_control(control),
905 				   skb->data + L2CAP_HDR_SIZE);
906 	}
907 }
908 
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
910 {
911 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 		return L2CAP_EXT_HDR_SIZE;
913 	else
914 		return L2CAP_ENH_HDR_SIZE;
915 }
916 
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
918 					       u32 control)
919 {
920 	struct sk_buff *skb;
921 	struct l2cap_hdr *lh;
922 	int hlen = __ertm_hdr_size(chan);
923 
924 	if (chan->fcs == L2CAP_FCS_CRC16)
925 		hlen += L2CAP_FCS_SIZE;
926 
927 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
928 
929 	if (!skb)
930 		return ERR_PTR(-ENOMEM);
931 
932 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 	lh->cid = cpu_to_le16(chan->dcid);
935 
936 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
938 	else
939 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
940 
941 	if (chan->fcs == L2CAP_FCS_CRC16) {
942 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
944 	}
945 
946 	skb->priority = HCI_PRIO_MAX;
947 	return skb;
948 }
949 
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 			      struct l2cap_ctrl *control)
952 {
953 	struct sk_buff *skb;
954 	u32 control_field;
955 
956 	BT_DBG("chan %p, control %p", chan, control);
957 
958 	if (!control->sframe)
959 		return;
960 
961 	if (__chan_is_moving(chan))
962 		return;
963 
964 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
965 	    !control->poll)
966 		control->final = 1;
967 
968 	if (control->super == L2CAP_SUPER_RR)
969 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 	else if (control->super == L2CAP_SUPER_RNR)
971 		set_bit(CONN_RNR_SENT, &chan->conn_state);
972 
973 	if (control->super != L2CAP_SUPER_SREJ) {
974 		chan->last_acked_seq = control->reqseq;
975 		__clear_ack_timer(chan);
976 	}
977 
978 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 	       control->final, control->poll, control->super);
980 
981 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 		control_field = __pack_extended_control(control);
983 	else
984 		control_field = __pack_enhanced_control(control);
985 
986 	skb = l2cap_create_sframe_pdu(chan, control_field);
987 	if (!IS_ERR(skb))
988 		l2cap_do_send(chan, skb);
989 }
990 
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
992 {
993 	struct l2cap_ctrl control;
994 
995 	BT_DBG("chan %p, poll %d", chan, poll);
996 
997 	memset(&control, 0, sizeof(control));
998 	control.sframe = 1;
999 	control.poll = poll;
1000 
1001 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 		control.super = L2CAP_SUPER_RNR;
1003 	else
1004 		control.super = L2CAP_SUPER_RR;
1005 
1006 	control.reqseq = chan->buffer_seq;
1007 	l2cap_send_sframe(chan, &control);
1008 }
1009 
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1011 {
1012 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1013 }
1014 
1015 static bool __amp_capable(struct l2cap_chan *chan)
1016 {
1017 	struct l2cap_conn *conn = chan->conn;
1018 
1019 	if (enable_hs &&
1020 	    hci_amp_capable() &&
1021 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1022 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1023 		return true;
1024 	else
1025 		return false;
1026 }
1027 
1028 static bool l2cap_check_efs(struct l2cap_chan *chan)
1029 {
1030 	/* Check EFS parameters */
1031 	return true;
1032 }
1033 
1034 void l2cap_send_conn_req(struct l2cap_chan *chan)
1035 {
1036 	struct l2cap_conn *conn = chan->conn;
1037 	struct l2cap_conn_req req;
1038 
1039 	req.scid = cpu_to_le16(chan->scid);
1040 	req.psm  = chan->psm;
1041 
1042 	chan->ident = l2cap_get_ident(conn);
1043 
1044 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1045 
1046 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1047 }
1048 
1049 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1050 {
1051 	struct l2cap_create_chan_req req;
1052 	req.scid = cpu_to_le16(chan->scid);
1053 	req.psm  = chan->psm;
1054 	req.amp_id = amp_id;
1055 
1056 	chan->ident = l2cap_get_ident(chan->conn);
1057 
1058 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1059 		       sizeof(req), &req);
1060 }
1061 
1062 static void l2cap_move_setup(struct l2cap_chan *chan)
1063 {
1064 	struct sk_buff *skb;
1065 
1066 	BT_DBG("chan %p", chan);
1067 
1068 	if (chan->mode != L2CAP_MODE_ERTM)
1069 		return;
1070 
1071 	__clear_retrans_timer(chan);
1072 	__clear_monitor_timer(chan);
1073 	__clear_ack_timer(chan);
1074 
1075 	chan->retry_count = 0;
1076 	skb_queue_walk(&chan->tx_q, skb) {
1077 		if (bt_cb(skb)->control.retries)
1078 			bt_cb(skb)->control.retries = 1;
1079 		else
1080 			break;
1081 	}
1082 
1083 	chan->expected_tx_seq = chan->buffer_seq;
1084 
1085 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1086 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1087 	l2cap_seq_list_clear(&chan->retrans_list);
1088 	l2cap_seq_list_clear(&chan->srej_list);
1089 	skb_queue_purge(&chan->srej_q);
1090 
1091 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1092 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1093 
1094 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1095 }
1096 
1097 static void l2cap_move_done(struct l2cap_chan *chan)
1098 {
1099 	u8 move_role = chan->move_role;
1100 	BT_DBG("chan %p", chan);
1101 
1102 	chan->move_state = L2CAP_MOVE_STABLE;
1103 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1104 
1105 	if (chan->mode != L2CAP_MODE_ERTM)
1106 		return;
1107 
1108 	switch (move_role) {
1109 	case L2CAP_MOVE_ROLE_INITIATOR:
1110 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1111 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1112 		break;
1113 	case L2CAP_MOVE_ROLE_RESPONDER:
1114 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1115 		break;
1116 	}
1117 }
1118 
1119 static void l2cap_chan_ready(struct l2cap_chan *chan)
1120 {
1121 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1122 	chan->conf_state = 0;
1123 	__clear_chan_timer(chan);
1124 
1125 	chan->state = BT_CONNECTED;
1126 
1127 	chan->ops->ready(chan);
1128 }
1129 
1130 static void l2cap_start_connection(struct l2cap_chan *chan)
1131 {
1132 	if (__amp_capable(chan)) {
1133 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1134 		a2mp_discover_amp(chan);
1135 	} else {
1136 		l2cap_send_conn_req(chan);
1137 	}
1138 }
1139 
1140 static void l2cap_do_start(struct l2cap_chan *chan)
1141 {
1142 	struct l2cap_conn *conn = chan->conn;
1143 
1144 	if (conn->hcon->type == LE_LINK) {
1145 		l2cap_chan_ready(chan);
1146 		return;
1147 	}
1148 
1149 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1150 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1151 			return;
1152 
1153 		if (l2cap_chan_check_security(chan) &&
1154 		    __l2cap_no_conn_pending(chan)) {
1155 			l2cap_start_connection(chan);
1156 		}
1157 	} else {
1158 		struct l2cap_info_req req;
1159 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1160 
1161 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1162 		conn->info_ident = l2cap_get_ident(conn);
1163 
1164 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1165 
1166 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1167 			       sizeof(req), &req);
1168 	}
1169 }
1170 
1171 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1172 {
1173 	u32 local_feat_mask = l2cap_feat_mask;
1174 	if (!disable_ertm)
1175 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1176 
1177 	switch (mode) {
1178 	case L2CAP_MODE_ERTM:
1179 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1180 	case L2CAP_MODE_STREAMING:
1181 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1182 	default:
1183 		return 0x00;
1184 	}
1185 }
1186 
1187 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1188 {
1189 	struct sock *sk = chan->sk;
1190 	struct l2cap_conn *conn = chan->conn;
1191 	struct l2cap_disconn_req req;
1192 
1193 	if (!conn)
1194 		return;
1195 
1196 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1197 		__clear_retrans_timer(chan);
1198 		__clear_monitor_timer(chan);
1199 		__clear_ack_timer(chan);
1200 	}
1201 
1202 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1203 		l2cap_state_change(chan, BT_DISCONN);
1204 		return;
1205 	}
1206 
1207 	req.dcid = cpu_to_le16(chan->dcid);
1208 	req.scid = cpu_to_le16(chan->scid);
1209 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1210 		       sizeof(req), &req);
1211 
1212 	lock_sock(sk);
1213 	__l2cap_state_change(chan, BT_DISCONN);
1214 	__l2cap_chan_set_err(chan, err);
1215 	release_sock(sk);
1216 }
1217 
1218 /* ---- L2CAP connections ---- */
1219 static void l2cap_conn_start(struct l2cap_conn *conn)
1220 {
1221 	struct l2cap_chan *chan, *tmp;
1222 
1223 	BT_DBG("conn %p", conn);
1224 
1225 	mutex_lock(&conn->chan_lock);
1226 
1227 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1228 		struct sock *sk = chan->sk;
1229 
1230 		l2cap_chan_lock(chan);
1231 
1232 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1233 			l2cap_chan_unlock(chan);
1234 			continue;
1235 		}
1236 
1237 		if (chan->state == BT_CONNECT) {
1238 			if (!l2cap_chan_check_security(chan) ||
1239 			    !__l2cap_no_conn_pending(chan)) {
1240 				l2cap_chan_unlock(chan);
1241 				continue;
1242 			}
1243 
1244 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1245 			    && test_bit(CONF_STATE2_DEVICE,
1246 					&chan->conf_state)) {
1247 				l2cap_chan_close(chan, ECONNRESET);
1248 				l2cap_chan_unlock(chan);
1249 				continue;
1250 			}
1251 
1252 			l2cap_start_connection(chan);
1253 
1254 		} else if (chan->state == BT_CONNECT2) {
1255 			struct l2cap_conn_rsp rsp;
1256 			char buf[128];
1257 			rsp.scid = cpu_to_le16(chan->dcid);
1258 			rsp.dcid = cpu_to_le16(chan->scid);
1259 
1260 			if (l2cap_chan_check_security(chan)) {
1261 				lock_sock(sk);
1262 				if (test_bit(BT_SK_DEFER_SETUP,
1263 					     &bt_sk(sk)->flags)) {
1264 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1265 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1266 					chan->ops->defer(chan);
1267 
1268 				} else {
1269 					__l2cap_state_change(chan, BT_CONFIG);
1270 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1271 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1272 				}
1273 				release_sock(sk);
1274 			} else {
1275 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1276 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1277 			}
1278 
1279 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1280 				       sizeof(rsp), &rsp);
1281 
1282 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1283 			    rsp.result != L2CAP_CR_SUCCESS) {
1284 				l2cap_chan_unlock(chan);
1285 				continue;
1286 			}
1287 
1288 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1289 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1290 				       l2cap_build_conf_req(chan, buf), buf);
1291 			chan->num_conf_req++;
1292 		}
1293 
1294 		l2cap_chan_unlock(chan);
1295 	}
1296 
1297 	mutex_unlock(&conn->chan_lock);
1298 }
1299 
1300 /* Find socket with cid and source/destination bdaddr.
1301  * Returns closest match, locked.
1302  */
1303 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1304 						    bdaddr_t *src,
1305 						    bdaddr_t *dst)
1306 {
1307 	struct l2cap_chan *c, *c1 = NULL;
1308 
1309 	read_lock(&chan_list_lock);
1310 
1311 	list_for_each_entry(c, &chan_list, global_l) {
1312 		struct sock *sk = c->sk;
1313 
1314 		if (state && c->state != state)
1315 			continue;
1316 
1317 		if (c->scid == cid) {
1318 			int src_match, dst_match;
1319 			int src_any, dst_any;
1320 
1321 			/* Exact match. */
1322 			src_match = !bacmp(&bt_sk(sk)->src, src);
1323 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1324 			if (src_match && dst_match) {
1325 				read_unlock(&chan_list_lock);
1326 				return c;
1327 			}
1328 
1329 			/* Closest match */
1330 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1331 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1332 			if ((src_match && dst_any) || (src_any && dst_match) ||
1333 			    (src_any && dst_any))
1334 				c1 = c;
1335 		}
1336 	}
1337 
1338 	read_unlock(&chan_list_lock);
1339 
1340 	return c1;
1341 }
1342 
1343 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1344 {
1345 	struct sock *parent;
1346 	struct l2cap_chan *chan, *pchan;
1347 
1348 	BT_DBG("");
1349 
1350 	/* Check if we have socket listening on cid */
1351 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1352 					  conn->src, conn->dst);
1353 	if (!pchan)
1354 		return;
1355 
1356 	/* Client ATT sockets should override the server one */
1357 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1358 		return;
1359 
1360 	parent = pchan->sk;
1361 
1362 	lock_sock(parent);
1363 
1364 	chan = pchan->ops->new_connection(pchan);
1365 	if (!chan)
1366 		goto clean;
1367 
1368 	chan->dcid = L2CAP_CID_ATT;
1369 
1370 	bacpy(&bt_sk(chan->sk)->src, conn->src);
1371 	bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1372 
1373 	__l2cap_chan_add(conn, chan);
1374 
1375 clean:
1376 	release_sock(parent);
1377 }
1378 
1379 static void l2cap_conn_ready(struct l2cap_conn *conn)
1380 {
1381 	struct l2cap_chan *chan;
1382 	struct hci_conn *hcon = conn->hcon;
1383 
1384 	BT_DBG("conn %p", conn);
1385 
1386 	/* For outgoing pairing which doesn't necessarily have an
1387 	 * associated socket (e.g. mgmt_pair_device).
1388 	 */
1389 	if (hcon->out && hcon->type == LE_LINK)
1390 		smp_conn_security(hcon, hcon->pending_sec_level);
1391 
1392 	mutex_lock(&conn->chan_lock);
1393 
1394 	if (hcon->type == LE_LINK)
1395 		l2cap_le_conn_ready(conn);
1396 
1397 	list_for_each_entry(chan, &conn->chan_l, list) {
1398 
1399 		l2cap_chan_lock(chan);
1400 
1401 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1402 			l2cap_chan_unlock(chan);
1403 			continue;
1404 		}
1405 
1406 		if (hcon->type == LE_LINK) {
1407 			if (smp_conn_security(hcon, chan->sec_level))
1408 				l2cap_chan_ready(chan);
1409 
1410 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1411 			struct sock *sk = chan->sk;
1412 			__clear_chan_timer(chan);
1413 			lock_sock(sk);
1414 			__l2cap_state_change(chan, BT_CONNECTED);
1415 			sk->sk_state_change(sk);
1416 			release_sock(sk);
1417 
1418 		} else if (chan->state == BT_CONNECT)
1419 			l2cap_do_start(chan);
1420 
1421 		l2cap_chan_unlock(chan);
1422 	}
1423 
1424 	mutex_unlock(&conn->chan_lock);
1425 }
1426 
1427 /* Notify sockets that we cannot guaranty reliability anymore */
1428 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1429 {
1430 	struct l2cap_chan *chan;
1431 
1432 	BT_DBG("conn %p", conn);
1433 
1434 	mutex_lock(&conn->chan_lock);
1435 
1436 	list_for_each_entry(chan, &conn->chan_l, list) {
1437 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1438 			l2cap_chan_set_err(chan, err);
1439 	}
1440 
1441 	mutex_unlock(&conn->chan_lock);
1442 }
1443 
1444 static void l2cap_info_timeout(struct work_struct *work)
1445 {
1446 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1447 					       info_timer.work);
1448 
1449 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1450 	conn->info_ident = 0;
1451 
1452 	l2cap_conn_start(conn);
1453 }
1454 
1455 /*
1456  * l2cap_user
1457  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1458  * callback is called during registration. The ->remove callback is called
1459  * during unregistration.
1460  * An l2cap_user object can either be explicitly unregistered or when the
1461  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1462  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1463  * External modules must own a reference to the l2cap_conn object if they intend
1464  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1465  * any time if they don't.
1466  */
1467 
1468 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1469 {
1470 	struct hci_dev *hdev = conn->hcon->hdev;
1471 	int ret;
1472 
1473 	/* We need to check whether l2cap_conn is registered. If it is not, we
1474 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1475 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1476 	 * relies on the parent hci_conn object to be locked. This itself relies
1477 	 * on the hci_dev object to be locked. So we must lock the hci device
1478 	 * here, too. */
1479 
1480 	hci_dev_lock(hdev);
1481 
1482 	if (user->list.next || user->list.prev) {
1483 		ret = -EINVAL;
1484 		goto out_unlock;
1485 	}
1486 
1487 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1488 	if (!conn->hchan) {
1489 		ret = -ENODEV;
1490 		goto out_unlock;
1491 	}
1492 
1493 	ret = user->probe(conn, user);
1494 	if (ret)
1495 		goto out_unlock;
1496 
1497 	list_add(&user->list, &conn->users);
1498 	ret = 0;
1499 
1500 out_unlock:
1501 	hci_dev_unlock(hdev);
1502 	return ret;
1503 }
1504 EXPORT_SYMBOL(l2cap_register_user);
1505 
1506 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1507 {
1508 	struct hci_dev *hdev = conn->hcon->hdev;
1509 
1510 	hci_dev_lock(hdev);
1511 
1512 	if (!user->list.next || !user->list.prev)
1513 		goto out_unlock;
1514 
1515 	list_del(&user->list);
1516 	user->list.next = NULL;
1517 	user->list.prev = NULL;
1518 	user->remove(conn, user);
1519 
1520 out_unlock:
1521 	hci_dev_unlock(hdev);
1522 }
1523 EXPORT_SYMBOL(l2cap_unregister_user);
1524 
1525 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1526 {
1527 	struct l2cap_user *user;
1528 
1529 	while (!list_empty(&conn->users)) {
1530 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1531 		list_del(&user->list);
1532 		user->list.next = NULL;
1533 		user->list.prev = NULL;
1534 		user->remove(conn, user);
1535 	}
1536 }
1537 
1538 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1539 {
1540 	struct l2cap_conn *conn = hcon->l2cap_data;
1541 	struct l2cap_chan *chan, *l;
1542 
1543 	if (!conn)
1544 		return;
1545 
1546 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1547 
1548 	kfree_skb(conn->rx_skb);
1549 
1550 	l2cap_unregister_all_users(conn);
1551 
1552 	mutex_lock(&conn->chan_lock);
1553 
1554 	/* Kill channels */
1555 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1556 		l2cap_chan_hold(chan);
1557 		l2cap_chan_lock(chan);
1558 
1559 		l2cap_chan_del(chan, err);
1560 
1561 		l2cap_chan_unlock(chan);
1562 
1563 		chan->ops->close(chan);
1564 		l2cap_chan_put(chan);
1565 	}
1566 
1567 	mutex_unlock(&conn->chan_lock);
1568 
1569 	hci_chan_del(conn->hchan);
1570 
1571 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1572 		cancel_delayed_work_sync(&conn->info_timer);
1573 
1574 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1575 		cancel_delayed_work_sync(&conn->security_timer);
1576 		smp_chan_destroy(conn);
1577 	}
1578 
1579 	hcon->l2cap_data = NULL;
1580 	conn->hchan = NULL;
1581 	l2cap_conn_put(conn);
1582 }
1583 
1584 static void security_timeout(struct work_struct *work)
1585 {
1586 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 					       security_timer.work);
1588 
1589 	BT_DBG("conn %p", conn);
1590 
1591 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1592 		smp_chan_destroy(conn);
1593 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1594 	}
1595 }
1596 
1597 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1598 {
1599 	struct l2cap_conn *conn = hcon->l2cap_data;
1600 	struct hci_chan *hchan;
1601 
1602 	if (conn)
1603 		return conn;
1604 
1605 	hchan = hci_chan_create(hcon);
1606 	if (!hchan)
1607 		return NULL;
1608 
1609 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1610 	if (!conn) {
1611 		hci_chan_del(hchan);
1612 		return NULL;
1613 	}
1614 
1615 	kref_init(&conn->ref);
1616 	hcon->l2cap_data = conn;
1617 	conn->hcon = hcon;
1618 	hci_conn_get(conn->hcon);
1619 	conn->hchan = hchan;
1620 
1621 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1622 
1623 	switch (hcon->type) {
1624 	case LE_LINK:
1625 		if (hcon->hdev->le_mtu) {
1626 			conn->mtu = hcon->hdev->le_mtu;
1627 			break;
1628 		}
1629 		/* fall through */
1630 	default:
1631 		conn->mtu = hcon->hdev->acl_mtu;
1632 		break;
1633 	}
1634 
1635 	conn->src = &hcon->hdev->bdaddr;
1636 	conn->dst = &hcon->dst;
1637 
1638 	conn->feat_mask = 0;
1639 
1640 	spin_lock_init(&conn->lock);
1641 	mutex_init(&conn->chan_lock);
1642 
1643 	INIT_LIST_HEAD(&conn->chan_l);
1644 	INIT_LIST_HEAD(&conn->users);
1645 
1646 	if (hcon->type == LE_LINK)
1647 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1648 	else
1649 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1650 
1651 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1652 
1653 	return conn;
1654 }
1655 
1656 static void l2cap_conn_free(struct kref *ref)
1657 {
1658 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1659 
1660 	hci_conn_put(conn->hcon);
1661 	kfree(conn);
1662 }
1663 
1664 void l2cap_conn_get(struct l2cap_conn *conn)
1665 {
1666 	kref_get(&conn->ref);
1667 }
1668 EXPORT_SYMBOL(l2cap_conn_get);
1669 
1670 void l2cap_conn_put(struct l2cap_conn *conn)
1671 {
1672 	kref_put(&conn->ref, l2cap_conn_free);
1673 }
1674 EXPORT_SYMBOL(l2cap_conn_put);
1675 
1676 /* ---- Socket interface ---- */
1677 
1678 /* Find socket with psm and source / destination bdaddr.
1679  * Returns closest match.
1680  */
1681 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1682 						   bdaddr_t *src,
1683 						   bdaddr_t *dst)
1684 {
1685 	struct l2cap_chan *c, *c1 = NULL;
1686 
1687 	read_lock(&chan_list_lock);
1688 
1689 	list_for_each_entry(c, &chan_list, global_l) {
1690 		struct sock *sk = c->sk;
1691 
1692 		if (state && c->state != state)
1693 			continue;
1694 
1695 		if (c->psm == psm) {
1696 			int src_match, dst_match;
1697 			int src_any, dst_any;
1698 
1699 			/* Exact match. */
1700 			src_match = !bacmp(&bt_sk(sk)->src, src);
1701 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1702 			if (src_match && dst_match) {
1703 				read_unlock(&chan_list_lock);
1704 				return c;
1705 			}
1706 
1707 			/* Closest match */
1708 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1709 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1710 			if ((src_match && dst_any) || (src_any && dst_match) ||
1711 			    (src_any && dst_any))
1712 				c1 = c;
1713 		}
1714 	}
1715 
1716 	read_unlock(&chan_list_lock);
1717 
1718 	return c1;
1719 }
1720 
1721 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1722 		       bdaddr_t *dst, u8 dst_type)
1723 {
1724 	struct sock *sk = chan->sk;
1725 	bdaddr_t *src = &bt_sk(sk)->src;
1726 	struct l2cap_conn *conn;
1727 	struct hci_conn *hcon;
1728 	struct hci_dev *hdev;
1729 	__u8 auth_type;
1730 	int err;
1731 
1732 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1733 	       dst_type, __le16_to_cpu(psm));
1734 
1735 	hdev = hci_get_route(dst, src);
1736 	if (!hdev)
1737 		return -EHOSTUNREACH;
1738 
1739 	hci_dev_lock(hdev);
1740 
1741 	l2cap_chan_lock(chan);
1742 
1743 	/* PSM must be odd and lsb of upper byte must be 0 */
1744 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1745 	    chan->chan_type != L2CAP_CHAN_RAW) {
1746 		err = -EINVAL;
1747 		goto done;
1748 	}
1749 
1750 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1751 		err = -EINVAL;
1752 		goto done;
1753 	}
1754 
1755 	switch (chan->mode) {
1756 	case L2CAP_MODE_BASIC:
1757 		break;
1758 	case L2CAP_MODE_ERTM:
1759 	case L2CAP_MODE_STREAMING:
1760 		if (!disable_ertm)
1761 			break;
1762 		/* fall through */
1763 	default:
1764 		err = -ENOTSUPP;
1765 		goto done;
1766 	}
1767 
1768 	switch (chan->state) {
1769 	case BT_CONNECT:
1770 	case BT_CONNECT2:
1771 	case BT_CONFIG:
1772 		/* Already connecting */
1773 		err = 0;
1774 		goto done;
1775 
1776 	case BT_CONNECTED:
1777 		/* Already connected */
1778 		err = -EISCONN;
1779 		goto done;
1780 
1781 	case BT_OPEN:
1782 	case BT_BOUND:
1783 		/* Can connect */
1784 		break;
1785 
1786 	default:
1787 		err = -EBADFD;
1788 		goto done;
1789 	}
1790 
1791 	/* Set destination address and psm */
1792 	lock_sock(sk);
1793 	bacpy(&bt_sk(sk)->dst, dst);
1794 	release_sock(sk);
1795 
1796 	chan->psm = psm;
1797 	chan->dcid = cid;
1798 
1799 	auth_type = l2cap_get_auth_type(chan);
1800 
1801 	if (bdaddr_type_is_le(dst_type))
1802 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1803 				   chan->sec_level, auth_type);
1804 	else
1805 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1806 				   chan->sec_level, auth_type);
1807 
1808 	if (IS_ERR(hcon)) {
1809 		err = PTR_ERR(hcon);
1810 		goto done;
1811 	}
1812 
1813 	conn = l2cap_conn_add(hcon);
1814 	if (!conn) {
1815 		hci_conn_drop(hcon);
1816 		err = -ENOMEM;
1817 		goto done;
1818 	}
1819 
1820 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1821 		hci_conn_drop(hcon);
1822 		err = -EBUSY;
1823 		goto done;
1824 	}
1825 
1826 	/* Update source addr of the socket */
1827 	bacpy(src, conn->src);
1828 
1829 	l2cap_chan_unlock(chan);
1830 	l2cap_chan_add(conn, chan);
1831 	l2cap_chan_lock(chan);
1832 
1833 	/* l2cap_chan_add takes its own ref so we can drop this one */
1834 	hci_conn_drop(hcon);
1835 
1836 	l2cap_state_change(chan, BT_CONNECT);
1837 	__set_chan_timer(chan, sk->sk_sndtimeo);
1838 
1839 	if (hcon->state == BT_CONNECTED) {
1840 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1841 			__clear_chan_timer(chan);
1842 			if (l2cap_chan_check_security(chan))
1843 				l2cap_state_change(chan, BT_CONNECTED);
1844 		} else
1845 			l2cap_do_start(chan);
1846 	}
1847 
1848 	err = 0;
1849 
1850 done:
1851 	l2cap_chan_unlock(chan);
1852 	hci_dev_unlock(hdev);
1853 	hci_dev_put(hdev);
1854 	return err;
1855 }
1856 
1857 int __l2cap_wait_ack(struct sock *sk)
1858 {
1859 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1860 	DECLARE_WAITQUEUE(wait, current);
1861 	int err = 0;
1862 	int timeo = HZ/5;
1863 
1864 	add_wait_queue(sk_sleep(sk), &wait);
1865 	set_current_state(TASK_INTERRUPTIBLE);
1866 	while (chan->unacked_frames > 0 && chan->conn) {
1867 		if (!timeo)
1868 			timeo = HZ/5;
1869 
1870 		if (signal_pending(current)) {
1871 			err = sock_intr_errno(timeo);
1872 			break;
1873 		}
1874 
1875 		release_sock(sk);
1876 		timeo = schedule_timeout(timeo);
1877 		lock_sock(sk);
1878 		set_current_state(TASK_INTERRUPTIBLE);
1879 
1880 		err = sock_error(sk);
1881 		if (err)
1882 			break;
1883 	}
1884 	set_current_state(TASK_RUNNING);
1885 	remove_wait_queue(sk_sleep(sk), &wait);
1886 	return err;
1887 }
1888 
1889 static void l2cap_monitor_timeout(struct work_struct *work)
1890 {
1891 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1892 					       monitor_timer.work);
1893 
1894 	BT_DBG("chan %p", chan);
1895 
1896 	l2cap_chan_lock(chan);
1897 
1898 	if (!chan->conn) {
1899 		l2cap_chan_unlock(chan);
1900 		l2cap_chan_put(chan);
1901 		return;
1902 	}
1903 
1904 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1905 
1906 	l2cap_chan_unlock(chan);
1907 	l2cap_chan_put(chan);
1908 }
1909 
1910 static void l2cap_retrans_timeout(struct work_struct *work)
1911 {
1912 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1913 					       retrans_timer.work);
1914 
1915 	BT_DBG("chan %p", chan);
1916 
1917 	l2cap_chan_lock(chan);
1918 
1919 	if (!chan->conn) {
1920 		l2cap_chan_unlock(chan);
1921 		l2cap_chan_put(chan);
1922 		return;
1923 	}
1924 
1925 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1926 	l2cap_chan_unlock(chan);
1927 	l2cap_chan_put(chan);
1928 }
1929 
1930 static void l2cap_streaming_send(struct l2cap_chan *chan,
1931 				 struct sk_buff_head *skbs)
1932 {
1933 	struct sk_buff *skb;
1934 	struct l2cap_ctrl *control;
1935 
1936 	BT_DBG("chan %p, skbs %p", chan, skbs);
1937 
1938 	if (__chan_is_moving(chan))
1939 		return;
1940 
1941 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1942 
1943 	while (!skb_queue_empty(&chan->tx_q)) {
1944 
1945 		skb = skb_dequeue(&chan->tx_q);
1946 
1947 		bt_cb(skb)->control.retries = 1;
1948 		control = &bt_cb(skb)->control;
1949 
1950 		control->reqseq = 0;
1951 		control->txseq = chan->next_tx_seq;
1952 
1953 		__pack_control(chan, control, skb);
1954 
1955 		if (chan->fcs == L2CAP_FCS_CRC16) {
1956 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1957 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1958 		}
1959 
1960 		l2cap_do_send(chan, skb);
1961 
1962 		BT_DBG("Sent txseq %u", control->txseq);
1963 
1964 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1965 		chan->frames_sent++;
1966 	}
1967 }
1968 
1969 static int l2cap_ertm_send(struct l2cap_chan *chan)
1970 {
1971 	struct sk_buff *skb, *tx_skb;
1972 	struct l2cap_ctrl *control;
1973 	int sent = 0;
1974 
1975 	BT_DBG("chan %p", chan);
1976 
1977 	if (chan->state != BT_CONNECTED)
1978 		return -ENOTCONN;
1979 
1980 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1981 		return 0;
1982 
1983 	if (__chan_is_moving(chan))
1984 		return 0;
1985 
1986 	while (chan->tx_send_head &&
1987 	       chan->unacked_frames < chan->remote_tx_win &&
1988 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1989 
1990 		skb = chan->tx_send_head;
1991 
1992 		bt_cb(skb)->control.retries = 1;
1993 		control = &bt_cb(skb)->control;
1994 
1995 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1996 			control->final = 1;
1997 
1998 		control->reqseq = chan->buffer_seq;
1999 		chan->last_acked_seq = chan->buffer_seq;
2000 		control->txseq = chan->next_tx_seq;
2001 
2002 		__pack_control(chan, control, skb);
2003 
2004 		if (chan->fcs == L2CAP_FCS_CRC16) {
2005 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2006 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2007 		}
2008 
2009 		/* Clone after data has been modified. Data is assumed to be
2010 		   read-only (for locking purposes) on cloned sk_buffs.
2011 		 */
2012 		tx_skb = skb_clone(skb, GFP_KERNEL);
2013 
2014 		if (!tx_skb)
2015 			break;
2016 
2017 		__set_retrans_timer(chan);
2018 
2019 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2020 		chan->unacked_frames++;
2021 		chan->frames_sent++;
2022 		sent++;
2023 
2024 		if (skb_queue_is_last(&chan->tx_q, skb))
2025 			chan->tx_send_head = NULL;
2026 		else
2027 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2028 
2029 		l2cap_do_send(chan, tx_skb);
2030 		BT_DBG("Sent txseq %u", control->txseq);
2031 	}
2032 
2033 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2034 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2035 
2036 	return sent;
2037 }
2038 
2039 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2040 {
2041 	struct l2cap_ctrl control;
2042 	struct sk_buff *skb;
2043 	struct sk_buff *tx_skb;
2044 	u16 seq;
2045 
2046 	BT_DBG("chan %p", chan);
2047 
2048 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2049 		return;
2050 
2051 	if (__chan_is_moving(chan))
2052 		return;
2053 
2054 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2055 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2056 
2057 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2058 		if (!skb) {
2059 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2060 			       seq);
2061 			continue;
2062 		}
2063 
2064 		bt_cb(skb)->control.retries++;
2065 		control = bt_cb(skb)->control;
2066 
2067 		if (chan->max_tx != 0 &&
2068 		    bt_cb(skb)->control.retries > chan->max_tx) {
2069 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2070 			l2cap_send_disconn_req(chan, ECONNRESET);
2071 			l2cap_seq_list_clear(&chan->retrans_list);
2072 			break;
2073 		}
2074 
2075 		control.reqseq = chan->buffer_seq;
2076 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2077 			control.final = 1;
2078 		else
2079 			control.final = 0;
2080 
2081 		if (skb_cloned(skb)) {
2082 			/* Cloned sk_buffs are read-only, so we need a
2083 			 * writeable copy
2084 			 */
2085 			tx_skb = skb_copy(skb, GFP_KERNEL);
2086 		} else {
2087 			tx_skb = skb_clone(skb, GFP_KERNEL);
2088 		}
2089 
2090 		if (!tx_skb) {
2091 			l2cap_seq_list_clear(&chan->retrans_list);
2092 			break;
2093 		}
2094 
2095 		/* Update skb contents */
2096 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2097 			put_unaligned_le32(__pack_extended_control(&control),
2098 					   tx_skb->data + L2CAP_HDR_SIZE);
2099 		} else {
2100 			put_unaligned_le16(__pack_enhanced_control(&control),
2101 					   tx_skb->data + L2CAP_HDR_SIZE);
2102 		}
2103 
2104 		if (chan->fcs == L2CAP_FCS_CRC16) {
2105 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2106 			put_unaligned_le16(fcs, skb_put(tx_skb,
2107 							L2CAP_FCS_SIZE));
2108 		}
2109 
2110 		l2cap_do_send(chan, tx_skb);
2111 
2112 		BT_DBG("Resent txseq %d", control.txseq);
2113 
2114 		chan->last_acked_seq = chan->buffer_seq;
2115 	}
2116 }
2117 
2118 static void l2cap_retransmit(struct l2cap_chan *chan,
2119 			     struct l2cap_ctrl *control)
2120 {
2121 	BT_DBG("chan %p, control %p", chan, control);
2122 
2123 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2124 	l2cap_ertm_resend(chan);
2125 }
2126 
2127 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2128 				 struct l2cap_ctrl *control)
2129 {
2130 	struct sk_buff *skb;
2131 
2132 	BT_DBG("chan %p, control %p", chan, control);
2133 
2134 	if (control->poll)
2135 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2136 
2137 	l2cap_seq_list_clear(&chan->retrans_list);
2138 
2139 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2140 		return;
2141 
2142 	if (chan->unacked_frames) {
2143 		skb_queue_walk(&chan->tx_q, skb) {
2144 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2145 			    skb == chan->tx_send_head)
2146 				break;
2147 		}
2148 
2149 		skb_queue_walk_from(&chan->tx_q, skb) {
2150 			if (skb == chan->tx_send_head)
2151 				break;
2152 
2153 			l2cap_seq_list_append(&chan->retrans_list,
2154 					      bt_cb(skb)->control.txseq);
2155 		}
2156 
2157 		l2cap_ertm_resend(chan);
2158 	}
2159 }
2160 
2161 static void l2cap_send_ack(struct l2cap_chan *chan)
2162 {
2163 	struct l2cap_ctrl control;
2164 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2165 					 chan->last_acked_seq);
2166 	int threshold;
2167 
2168 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2169 	       chan, chan->last_acked_seq, chan->buffer_seq);
2170 
2171 	memset(&control, 0, sizeof(control));
2172 	control.sframe = 1;
2173 
2174 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2175 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2176 		__clear_ack_timer(chan);
2177 		control.super = L2CAP_SUPER_RNR;
2178 		control.reqseq = chan->buffer_seq;
2179 		l2cap_send_sframe(chan, &control);
2180 	} else {
2181 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2182 			l2cap_ertm_send(chan);
2183 			/* If any i-frames were sent, they included an ack */
2184 			if (chan->buffer_seq == chan->last_acked_seq)
2185 				frames_to_ack = 0;
2186 		}
2187 
2188 		/* Ack now if the window is 3/4ths full.
2189 		 * Calculate without mul or div
2190 		 */
2191 		threshold = chan->ack_win;
2192 		threshold += threshold << 1;
2193 		threshold >>= 2;
2194 
2195 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2196 		       threshold);
2197 
2198 		if (frames_to_ack >= threshold) {
2199 			__clear_ack_timer(chan);
2200 			control.super = L2CAP_SUPER_RR;
2201 			control.reqseq = chan->buffer_seq;
2202 			l2cap_send_sframe(chan, &control);
2203 			frames_to_ack = 0;
2204 		}
2205 
2206 		if (frames_to_ack)
2207 			__set_ack_timer(chan);
2208 	}
2209 }
2210 
2211 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2212 					 struct msghdr *msg, int len,
2213 					 int count, struct sk_buff *skb)
2214 {
2215 	struct l2cap_conn *conn = chan->conn;
2216 	struct sk_buff **frag;
2217 	int sent = 0;
2218 
2219 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2220 		return -EFAULT;
2221 
2222 	sent += count;
2223 	len  -= count;
2224 
2225 	/* Continuation fragments (no L2CAP header) */
2226 	frag = &skb_shinfo(skb)->frag_list;
2227 	while (len) {
2228 		struct sk_buff *tmp;
2229 
2230 		count = min_t(unsigned int, conn->mtu, len);
2231 
2232 		tmp = chan->ops->alloc_skb(chan, count,
2233 					   msg->msg_flags & MSG_DONTWAIT);
2234 		if (IS_ERR(tmp))
2235 			return PTR_ERR(tmp);
2236 
2237 		*frag = tmp;
2238 
2239 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2240 			return -EFAULT;
2241 
2242 		(*frag)->priority = skb->priority;
2243 
2244 		sent += count;
2245 		len  -= count;
2246 
2247 		skb->len += (*frag)->len;
2248 		skb->data_len += (*frag)->len;
2249 
2250 		frag = &(*frag)->next;
2251 	}
2252 
2253 	return sent;
2254 }
2255 
2256 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2257 						 struct msghdr *msg, size_t len,
2258 						 u32 priority)
2259 {
2260 	struct l2cap_conn *conn = chan->conn;
2261 	struct sk_buff *skb;
2262 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2263 	struct l2cap_hdr *lh;
2264 
2265 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2266 
2267 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2268 
2269 	skb = chan->ops->alloc_skb(chan, count + hlen,
2270 				   msg->msg_flags & MSG_DONTWAIT);
2271 	if (IS_ERR(skb))
2272 		return skb;
2273 
2274 	skb->priority = priority;
2275 
2276 	/* Create L2CAP header */
2277 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2278 	lh->cid = cpu_to_le16(chan->dcid);
2279 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2280 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2281 
2282 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2283 	if (unlikely(err < 0)) {
2284 		kfree_skb(skb);
2285 		return ERR_PTR(err);
2286 	}
2287 	return skb;
2288 }
2289 
2290 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2291 					      struct msghdr *msg, size_t len,
2292 					      u32 priority)
2293 {
2294 	struct l2cap_conn *conn = chan->conn;
2295 	struct sk_buff *skb;
2296 	int err, count;
2297 	struct l2cap_hdr *lh;
2298 
2299 	BT_DBG("chan %p len %zu", chan, len);
2300 
2301 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2302 
2303 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2304 				   msg->msg_flags & MSG_DONTWAIT);
2305 	if (IS_ERR(skb))
2306 		return skb;
2307 
2308 	skb->priority = priority;
2309 
2310 	/* Create L2CAP header */
2311 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2312 	lh->cid = cpu_to_le16(chan->dcid);
2313 	lh->len = cpu_to_le16(len);
2314 
2315 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2316 	if (unlikely(err < 0)) {
2317 		kfree_skb(skb);
2318 		return ERR_PTR(err);
2319 	}
2320 	return skb;
2321 }
2322 
2323 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2324 					       struct msghdr *msg, size_t len,
2325 					       u16 sdulen)
2326 {
2327 	struct l2cap_conn *conn = chan->conn;
2328 	struct sk_buff *skb;
2329 	int err, count, hlen;
2330 	struct l2cap_hdr *lh;
2331 
2332 	BT_DBG("chan %p len %zu", chan, len);
2333 
2334 	if (!conn)
2335 		return ERR_PTR(-ENOTCONN);
2336 
2337 	hlen = __ertm_hdr_size(chan);
2338 
2339 	if (sdulen)
2340 		hlen += L2CAP_SDULEN_SIZE;
2341 
2342 	if (chan->fcs == L2CAP_FCS_CRC16)
2343 		hlen += L2CAP_FCS_SIZE;
2344 
2345 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2346 
2347 	skb = chan->ops->alloc_skb(chan, count + hlen,
2348 				   msg->msg_flags & MSG_DONTWAIT);
2349 	if (IS_ERR(skb))
2350 		return skb;
2351 
2352 	/* Create L2CAP header */
2353 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2354 	lh->cid = cpu_to_le16(chan->dcid);
2355 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2356 
2357 	/* Control header is populated later */
2358 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2359 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2360 	else
2361 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2362 
2363 	if (sdulen)
2364 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2365 
2366 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2367 	if (unlikely(err < 0)) {
2368 		kfree_skb(skb);
2369 		return ERR_PTR(err);
2370 	}
2371 
2372 	bt_cb(skb)->control.fcs = chan->fcs;
2373 	bt_cb(skb)->control.retries = 0;
2374 	return skb;
2375 }
2376 
2377 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2378 			     struct sk_buff_head *seg_queue,
2379 			     struct msghdr *msg, size_t len)
2380 {
2381 	struct sk_buff *skb;
2382 	u16 sdu_len;
2383 	size_t pdu_len;
2384 	u8 sar;
2385 
2386 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2387 
2388 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2389 	 * so fragmented skbs are not used.  The HCI layer's handling
2390 	 * of fragmented skbs is not compatible with ERTM's queueing.
2391 	 */
2392 
2393 	/* PDU size is derived from the HCI MTU */
2394 	pdu_len = chan->conn->mtu;
2395 
2396 	/* Constrain PDU size for BR/EDR connections */
2397 	if (!chan->hs_hcon)
2398 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2399 
2400 	/* Adjust for largest possible L2CAP overhead. */
2401 	if (chan->fcs)
2402 		pdu_len -= L2CAP_FCS_SIZE;
2403 
2404 	pdu_len -= __ertm_hdr_size(chan);
2405 
2406 	/* Remote device may have requested smaller PDUs */
2407 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2408 
2409 	if (len <= pdu_len) {
2410 		sar = L2CAP_SAR_UNSEGMENTED;
2411 		sdu_len = 0;
2412 		pdu_len = len;
2413 	} else {
2414 		sar = L2CAP_SAR_START;
2415 		sdu_len = len;
2416 		pdu_len -= L2CAP_SDULEN_SIZE;
2417 	}
2418 
2419 	while (len > 0) {
2420 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2421 
2422 		if (IS_ERR(skb)) {
2423 			__skb_queue_purge(seg_queue);
2424 			return PTR_ERR(skb);
2425 		}
2426 
2427 		bt_cb(skb)->control.sar = sar;
2428 		__skb_queue_tail(seg_queue, skb);
2429 
2430 		len -= pdu_len;
2431 		if (sdu_len) {
2432 			sdu_len = 0;
2433 			pdu_len += L2CAP_SDULEN_SIZE;
2434 		}
2435 
2436 		if (len <= pdu_len) {
2437 			sar = L2CAP_SAR_END;
2438 			pdu_len = len;
2439 		} else {
2440 			sar = L2CAP_SAR_CONTINUE;
2441 		}
2442 	}
2443 
2444 	return 0;
2445 }
2446 
2447 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2448 		    u32 priority)
2449 {
2450 	struct sk_buff *skb;
2451 	int err;
2452 	struct sk_buff_head seg_queue;
2453 
2454 	/* Connectionless channel */
2455 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2456 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2457 		if (IS_ERR(skb))
2458 			return PTR_ERR(skb);
2459 
2460 		l2cap_do_send(chan, skb);
2461 		return len;
2462 	}
2463 
2464 	switch (chan->mode) {
2465 	case L2CAP_MODE_BASIC:
2466 		/* Check outgoing MTU */
2467 		if (len > chan->omtu)
2468 			return -EMSGSIZE;
2469 
2470 		/* Create a basic PDU */
2471 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2472 		if (IS_ERR(skb))
2473 			return PTR_ERR(skb);
2474 
2475 		l2cap_do_send(chan, skb);
2476 		err = len;
2477 		break;
2478 
2479 	case L2CAP_MODE_ERTM:
2480 	case L2CAP_MODE_STREAMING:
2481 		/* Check outgoing MTU */
2482 		if (len > chan->omtu) {
2483 			err = -EMSGSIZE;
2484 			break;
2485 		}
2486 
2487 		__skb_queue_head_init(&seg_queue);
2488 
2489 		/* Do segmentation before calling in to the state machine,
2490 		 * since it's possible to block while waiting for memory
2491 		 * allocation.
2492 		 */
2493 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2494 
2495 		/* The channel could have been closed while segmenting,
2496 		 * check that it is still connected.
2497 		 */
2498 		if (chan->state != BT_CONNECTED) {
2499 			__skb_queue_purge(&seg_queue);
2500 			err = -ENOTCONN;
2501 		}
2502 
2503 		if (err)
2504 			break;
2505 
2506 		if (chan->mode == L2CAP_MODE_ERTM)
2507 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2508 		else
2509 			l2cap_streaming_send(chan, &seg_queue);
2510 
2511 		err = len;
2512 
2513 		/* If the skbs were not queued for sending, they'll still be in
2514 		 * seg_queue and need to be purged.
2515 		 */
2516 		__skb_queue_purge(&seg_queue);
2517 		break;
2518 
2519 	default:
2520 		BT_DBG("bad state %1.1x", chan->mode);
2521 		err = -EBADFD;
2522 	}
2523 
2524 	return err;
2525 }
2526 
2527 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2528 {
2529 	struct l2cap_ctrl control;
2530 	u16 seq;
2531 
2532 	BT_DBG("chan %p, txseq %u", chan, txseq);
2533 
2534 	memset(&control, 0, sizeof(control));
2535 	control.sframe = 1;
2536 	control.super = L2CAP_SUPER_SREJ;
2537 
2538 	for (seq = chan->expected_tx_seq; seq != txseq;
2539 	     seq = __next_seq(chan, seq)) {
2540 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2541 			control.reqseq = seq;
2542 			l2cap_send_sframe(chan, &control);
2543 			l2cap_seq_list_append(&chan->srej_list, seq);
2544 		}
2545 	}
2546 
2547 	chan->expected_tx_seq = __next_seq(chan, txseq);
2548 }
2549 
2550 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2551 {
2552 	struct l2cap_ctrl control;
2553 
2554 	BT_DBG("chan %p", chan);
2555 
2556 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2557 		return;
2558 
2559 	memset(&control, 0, sizeof(control));
2560 	control.sframe = 1;
2561 	control.super = L2CAP_SUPER_SREJ;
2562 	control.reqseq = chan->srej_list.tail;
2563 	l2cap_send_sframe(chan, &control);
2564 }
2565 
2566 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2567 {
2568 	struct l2cap_ctrl control;
2569 	u16 initial_head;
2570 	u16 seq;
2571 
2572 	BT_DBG("chan %p, txseq %u", chan, txseq);
2573 
2574 	memset(&control, 0, sizeof(control));
2575 	control.sframe = 1;
2576 	control.super = L2CAP_SUPER_SREJ;
2577 
2578 	/* Capture initial list head to allow only one pass through the list. */
2579 	initial_head = chan->srej_list.head;
2580 
2581 	do {
2582 		seq = l2cap_seq_list_pop(&chan->srej_list);
2583 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2584 			break;
2585 
2586 		control.reqseq = seq;
2587 		l2cap_send_sframe(chan, &control);
2588 		l2cap_seq_list_append(&chan->srej_list, seq);
2589 	} while (chan->srej_list.head != initial_head);
2590 }
2591 
2592 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2593 {
2594 	struct sk_buff *acked_skb;
2595 	u16 ackseq;
2596 
2597 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2598 
2599 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2600 		return;
2601 
2602 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2603 	       chan->expected_ack_seq, chan->unacked_frames);
2604 
2605 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2606 	     ackseq = __next_seq(chan, ackseq)) {
2607 
2608 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2609 		if (acked_skb) {
2610 			skb_unlink(acked_skb, &chan->tx_q);
2611 			kfree_skb(acked_skb);
2612 			chan->unacked_frames--;
2613 		}
2614 	}
2615 
2616 	chan->expected_ack_seq = reqseq;
2617 
2618 	if (chan->unacked_frames == 0)
2619 		__clear_retrans_timer(chan);
2620 
2621 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2622 }
2623 
2624 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2625 {
2626 	BT_DBG("chan %p", chan);
2627 
2628 	chan->expected_tx_seq = chan->buffer_seq;
2629 	l2cap_seq_list_clear(&chan->srej_list);
2630 	skb_queue_purge(&chan->srej_q);
2631 	chan->rx_state = L2CAP_RX_STATE_RECV;
2632 }
2633 
2634 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2635 				struct l2cap_ctrl *control,
2636 				struct sk_buff_head *skbs, u8 event)
2637 {
2638 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2639 	       event);
2640 
2641 	switch (event) {
2642 	case L2CAP_EV_DATA_REQUEST:
2643 		if (chan->tx_send_head == NULL)
2644 			chan->tx_send_head = skb_peek(skbs);
2645 
2646 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2647 		l2cap_ertm_send(chan);
2648 		break;
2649 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2650 		BT_DBG("Enter LOCAL_BUSY");
2651 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2652 
2653 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2654 			/* The SREJ_SENT state must be aborted if we are to
2655 			 * enter the LOCAL_BUSY state.
2656 			 */
2657 			l2cap_abort_rx_srej_sent(chan);
2658 		}
2659 
2660 		l2cap_send_ack(chan);
2661 
2662 		break;
2663 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2664 		BT_DBG("Exit LOCAL_BUSY");
2665 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2666 
2667 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2668 			struct l2cap_ctrl local_control;
2669 
2670 			memset(&local_control, 0, sizeof(local_control));
2671 			local_control.sframe = 1;
2672 			local_control.super = L2CAP_SUPER_RR;
2673 			local_control.poll = 1;
2674 			local_control.reqseq = chan->buffer_seq;
2675 			l2cap_send_sframe(chan, &local_control);
2676 
2677 			chan->retry_count = 1;
2678 			__set_monitor_timer(chan);
2679 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 		}
2681 		break;
2682 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2683 		l2cap_process_reqseq(chan, control->reqseq);
2684 		break;
2685 	case L2CAP_EV_EXPLICIT_POLL:
2686 		l2cap_send_rr_or_rnr(chan, 1);
2687 		chan->retry_count = 1;
2688 		__set_monitor_timer(chan);
2689 		__clear_ack_timer(chan);
2690 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 		break;
2692 	case L2CAP_EV_RETRANS_TO:
2693 		l2cap_send_rr_or_rnr(chan, 1);
2694 		chan->retry_count = 1;
2695 		__set_monitor_timer(chan);
2696 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2697 		break;
2698 	case L2CAP_EV_RECV_FBIT:
2699 		/* Nothing to process */
2700 		break;
2701 	default:
2702 		break;
2703 	}
2704 }
2705 
2706 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2707 				  struct l2cap_ctrl *control,
2708 				  struct sk_buff_head *skbs, u8 event)
2709 {
2710 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2711 	       event);
2712 
2713 	switch (event) {
2714 	case L2CAP_EV_DATA_REQUEST:
2715 		if (chan->tx_send_head == NULL)
2716 			chan->tx_send_head = skb_peek(skbs);
2717 		/* Queue data, but don't send. */
2718 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2719 		break;
2720 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 		BT_DBG("Enter LOCAL_BUSY");
2722 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 
2724 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 			/* The SREJ_SENT state must be aborted if we are to
2726 			 * enter the LOCAL_BUSY state.
2727 			 */
2728 			l2cap_abort_rx_srej_sent(chan);
2729 		}
2730 
2731 		l2cap_send_ack(chan);
2732 
2733 		break;
2734 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 		BT_DBG("Exit LOCAL_BUSY");
2736 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737 
2738 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 			struct l2cap_ctrl local_control;
2740 			memset(&local_control, 0, sizeof(local_control));
2741 			local_control.sframe = 1;
2742 			local_control.super = L2CAP_SUPER_RR;
2743 			local_control.poll = 1;
2744 			local_control.reqseq = chan->buffer_seq;
2745 			l2cap_send_sframe(chan, &local_control);
2746 
2747 			chan->retry_count = 1;
2748 			__set_monitor_timer(chan);
2749 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 		}
2751 		break;
2752 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 		l2cap_process_reqseq(chan, control->reqseq);
2754 
2755 		/* Fall through */
2756 
2757 	case L2CAP_EV_RECV_FBIT:
2758 		if (control && control->final) {
2759 			__clear_monitor_timer(chan);
2760 			if (chan->unacked_frames > 0)
2761 				__set_retrans_timer(chan);
2762 			chan->retry_count = 0;
2763 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2764 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2765 		}
2766 		break;
2767 	case L2CAP_EV_EXPLICIT_POLL:
2768 		/* Ignore */
2769 		break;
2770 	case L2CAP_EV_MONITOR_TO:
2771 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2772 			l2cap_send_rr_or_rnr(chan, 1);
2773 			__set_monitor_timer(chan);
2774 			chan->retry_count++;
2775 		} else {
2776 			l2cap_send_disconn_req(chan, ECONNABORTED);
2777 		}
2778 		break;
2779 	default:
2780 		break;
2781 	}
2782 }
2783 
2784 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2785 		     struct sk_buff_head *skbs, u8 event)
2786 {
2787 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 	       chan, control, skbs, event, chan->tx_state);
2789 
2790 	switch (chan->tx_state) {
2791 	case L2CAP_TX_STATE_XMIT:
2792 		l2cap_tx_state_xmit(chan, control, skbs, event);
2793 		break;
2794 	case L2CAP_TX_STATE_WAIT_F:
2795 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2796 		break;
2797 	default:
2798 		/* Ignore event */
2799 		break;
2800 	}
2801 }
2802 
2803 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2804 			     struct l2cap_ctrl *control)
2805 {
2806 	BT_DBG("chan %p, control %p", chan, control);
2807 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2808 }
2809 
2810 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2811 				  struct l2cap_ctrl *control)
2812 {
2813 	BT_DBG("chan %p, control %p", chan, control);
2814 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2815 }
2816 
2817 /* Copy frame to all raw sockets on that connection */
2818 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2819 {
2820 	struct sk_buff *nskb;
2821 	struct l2cap_chan *chan;
2822 
2823 	BT_DBG("conn %p", conn);
2824 
2825 	mutex_lock(&conn->chan_lock);
2826 
2827 	list_for_each_entry(chan, &conn->chan_l, list) {
2828 		struct sock *sk = chan->sk;
2829 		if (chan->chan_type != L2CAP_CHAN_RAW)
2830 			continue;
2831 
2832 		/* Don't send frame to the socket it came from */
2833 		if (skb->sk == sk)
2834 			continue;
2835 		nskb = skb_clone(skb, GFP_KERNEL);
2836 		if (!nskb)
2837 			continue;
2838 
2839 		if (chan->ops->recv(chan, nskb))
2840 			kfree_skb(nskb);
2841 	}
2842 
2843 	mutex_unlock(&conn->chan_lock);
2844 }
2845 
2846 /* ---- L2CAP signalling commands ---- */
2847 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2848 				       u8 ident, u16 dlen, void *data)
2849 {
2850 	struct sk_buff *skb, **frag;
2851 	struct l2cap_cmd_hdr *cmd;
2852 	struct l2cap_hdr *lh;
2853 	int len, count;
2854 
2855 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2856 	       conn, code, ident, dlen);
2857 
2858 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2859 		return NULL;
2860 
2861 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2862 	count = min_t(unsigned int, conn->mtu, len);
2863 
2864 	skb = bt_skb_alloc(count, GFP_KERNEL);
2865 	if (!skb)
2866 		return NULL;
2867 
2868 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2869 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2870 
2871 	if (conn->hcon->type == LE_LINK)
2872 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2873 	else
2874 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2875 
2876 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2877 	cmd->code  = code;
2878 	cmd->ident = ident;
2879 	cmd->len   = cpu_to_le16(dlen);
2880 
2881 	if (dlen) {
2882 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2883 		memcpy(skb_put(skb, count), data, count);
2884 		data += count;
2885 	}
2886 
2887 	len -= skb->len;
2888 
2889 	/* Continuation fragments (no L2CAP header) */
2890 	frag = &skb_shinfo(skb)->frag_list;
2891 	while (len) {
2892 		count = min_t(unsigned int, conn->mtu, len);
2893 
2894 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2895 		if (!*frag)
2896 			goto fail;
2897 
2898 		memcpy(skb_put(*frag, count), data, count);
2899 
2900 		len  -= count;
2901 		data += count;
2902 
2903 		frag = &(*frag)->next;
2904 	}
2905 
2906 	return skb;
2907 
2908 fail:
2909 	kfree_skb(skb);
2910 	return NULL;
2911 }
2912 
2913 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2914 				     unsigned long *val)
2915 {
2916 	struct l2cap_conf_opt *opt = *ptr;
2917 	int len;
2918 
2919 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2920 	*ptr += len;
2921 
2922 	*type = opt->type;
2923 	*olen = opt->len;
2924 
2925 	switch (opt->len) {
2926 	case 1:
2927 		*val = *((u8 *) opt->val);
2928 		break;
2929 
2930 	case 2:
2931 		*val = get_unaligned_le16(opt->val);
2932 		break;
2933 
2934 	case 4:
2935 		*val = get_unaligned_le32(opt->val);
2936 		break;
2937 
2938 	default:
2939 		*val = (unsigned long) opt->val;
2940 		break;
2941 	}
2942 
2943 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2944 	return len;
2945 }
2946 
2947 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2948 {
2949 	struct l2cap_conf_opt *opt = *ptr;
2950 
2951 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2952 
2953 	opt->type = type;
2954 	opt->len  = len;
2955 
2956 	switch (len) {
2957 	case 1:
2958 		*((u8 *) opt->val)  = val;
2959 		break;
2960 
2961 	case 2:
2962 		put_unaligned_le16(val, opt->val);
2963 		break;
2964 
2965 	case 4:
2966 		put_unaligned_le32(val, opt->val);
2967 		break;
2968 
2969 	default:
2970 		memcpy(opt->val, (void *) val, len);
2971 		break;
2972 	}
2973 
2974 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2975 }
2976 
2977 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2978 {
2979 	struct l2cap_conf_efs efs;
2980 
2981 	switch (chan->mode) {
2982 	case L2CAP_MODE_ERTM:
2983 		efs.id		= chan->local_id;
2984 		efs.stype	= chan->local_stype;
2985 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2986 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2987 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2988 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2989 		break;
2990 
2991 	case L2CAP_MODE_STREAMING:
2992 		efs.id		= 1;
2993 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2994 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2995 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2996 		efs.acc_lat	= 0;
2997 		efs.flush_to	= 0;
2998 		break;
2999 
3000 	default:
3001 		return;
3002 	}
3003 
3004 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3005 			   (unsigned long) &efs);
3006 }
3007 
3008 static void l2cap_ack_timeout(struct work_struct *work)
3009 {
3010 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3011 					       ack_timer.work);
3012 	u16 frames_to_ack;
3013 
3014 	BT_DBG("chan %p", chan);
3015 
3016 	l2cap_chan_lock(chan);
3017 
3018 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3019 				     chan->last_acked_seq);
3020 
3021 	if (frames_to_ack)
3022 		l2cap_send_rr_or_rnr(chan, 0);
3023 
3024 	l2cap_chan_unlock(chan);
3025 	l2cap_chan_put(chan);
3026 }
3027 
3028 int l2cap_ertm_init(struct l2cap_chan *chan)
3029 {
3030 	int err;
3031 
3032 	chan->next_tx_seq = 0;
3033 	chan->expected_tx_seq = 0;
3034 	chan->expected_ack_seq = 0;
3035 	chan->unacked_frames = 0;
3036 	chan->buffer_seq = 0;
3037 	chan->frames_sent = 0;
3038 	chan->last_acked_seq = 0;
3039 	chan->sdu = NULL;
3040 	chan->sdu_last_frag = NULL;
3041 	chan->sdu_len = 0;
3042 
3043 	skb_queue_head_init(&chan->tx_q);
3044 
3045 	chan->local_amp_id = 0;
3046 	chan->move_id = 0;
3047 	chan->move_state = L2CAP_MOVE_STABLE;
3048 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3049 
3050 	if (chan->mode != L2CAP_MODE_ERTM)
3051 		return 0;
3052 
3053 	chan->rx_state = L2CAP_RX_STATE_RECV;
3054 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3055 
3056 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3057 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3058 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3059 
3060 	skb_queue_head_init(&chan->srej_q);
3061 
3062 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3063 	if (err < 0)
3064 		return err;
3065 
3066 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3067 	if (err < 0)
3068 		l2cap_seq_list_free(&chan->srej_list);
3069 
3070 	return err;
3071 }
3072 
3073 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3074 {
3075 	switch (mode) {
3076 	case L2CAP_MODE_STREAMING:
3077 	case L2CAP_MODE_ERTM:
3078 		if (l2cap_mode_supported(mode, remote_feat_mask))
3079 			return mode;
3080 		/* fall through */
3081 	default:
3082 		return L2CAP_MODE_BASIC;
3083 	}
3084 }
3085 
3086 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3087 {
3088 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3089 }
3090 
3091 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3092 {
3093 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3094 }
3095 
3096 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3097 				      struct l2cap_conf_rfc *rfc)
3098 {
3099 	if (chan->local_amp_id && chan->hs_hcon) {
3100 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3101 
3102 		/* Class 1 devices have must have ERTM timeouts
3103 		 * exceeding the Link Supervision Timeout.  The
3104 		 * default Link Supervision Timeout for AMP
3105 		 * controllers is 10 seconds.
3106 		 *
3107 		 * Class 1 devices use 0xffffffff for their
3108 		 * best-effort flush timeout, so the clamping logic
3109 		 * will result in a timeout that meets the above
3110 		 * requirement.  ERTM timeouts are 16-bit values, so
3111 		 * the maximum timeout is 65.535 seconds.
3112 		 */
3113 
3114 		/* Convert timeout to milliseconds and round */
3115 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3116 
3117 		/* This is the recommended formula for class 2 devices
3118 		 * that start ERTM timers when packets are sent to the
3119 		 * controller.
3120 		 */
3121 		ertm_to = 3 * ertm_to + 500;
3122 
3123 		if (ertm_to > 0xffff)
3124 			ertm_to = 0xffff;
3125 
3126 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3127 		rfc->monitor_timeout = rfc->retrans_timeout;
3128 	} else {
3129 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3130 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3131 	}
3132 }
3133 
3134 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3135 {
3136 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3137 	    __l2cap_ews_supported(chan)) {
3138 		/* use extended control field */
3139 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3140 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3141 	} else {
3142 		chan->tx_win = min_t(u16, chan->tx_win,
3143 				     L2CAP_DEFAULT_TX_WINDOW);
3144 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3145 	}
3146 	chan->ack_win = chan->tx_win;
3147 }
3148 
3149 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3150 {
3151 	struct l2cap_conf_req *req = data;
3152 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3153 	void *ptr = req->data;
3154 	u16 size;
3155 
3156 	BT_DBG("chan %p", chan);
3157 
3158 	if (chan->num_conf_req || chan->num_conf_rsp)
3159 		goto done;
3160 
3161 	switch (chan->mode) {
3162 	case L2CAP_MODE_STREAMING:
3163 	case L2CAP_MODE_ERTM:
3164 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3165 			break;
3166 
3167 		if (__l2cap_efs_supported(chan))
3168 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3169 
3170 		/* fall through */
3171 	default:
3172 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3173 		break;
3174 	}
3175 
3176 done:
3177 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3178 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3179 
3180 	switch (chan->mode) {
3181 	case L2CAP_MODE_BASIC:
3182 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3184 			break;
3185 
3186 		rfc.mode            = L2CAP_MODE_BASIC;
3187 		rfc.txwin_size      = 0;
3188 		rfc.max_transmit    = 0;
3189 		rfc.retrans_timeout = 0;
3190 		rfc.monitor_timeout = 0;
3191 		rfc.max_pdu_size    = 0;
3192 
3193 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3194 				   (unsigned long) &rfc);
3195 		break;
3196 
3197 	case L2CAP_MODE_ERTM:
3198 		rfc.mode            = L2CAP_MODE_ERTM;
3199 		rfc.max_transmit    = chan->max_tx;
3200 
3201 		__l2cap_set_ertm_timeouts(chan, &rfc);
3202 
3203 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3204 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3205 			     L2CAP_FCS_SIZE);
3206 		rfc.max_pdu_size = cpu_to_le16(size);
3207 
3208 		l2cap_txwin_setup(chan);
3209 
3210 		rfc.txwin_size = min_t(u16, chan->tx_win,
3211 				       L2CAP_DEFAULT_TX_WINDOW);
3212 
3213 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3214 				   (unsigned long) &rfc);
3215 
3216 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3217 			l2cap_add_opt_efs(&ptr, chan);
3218 
3219 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3220 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3221 					   chan->tx_win);
3222 
3223 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3224 			if (chan->fcs == L2CAP_FCS_NONE ||
3225 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3226 				chan->fcs = L2CAP_FCS_NONE;
3227 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3228 						   chan->fcs);
3229 			}
3230 		break;
3231 
3232 	case L2CAP_MODE_STREAMING:
3233 		l2cap_txwin_setup(chan);
3234 		rfc.mode            = L2CAP_MODE_STREAMING;
3235 		rfc.txwin_size      = 0;
3236 		rfc.max_transmit    = 0;
3237 		rfc.retrans_timeout = 0;
3238 		rfc.monitor_timeout = 0;
3239 
3240 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3241 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3242 			     L2CAP_FCS_SIZE);
3243 		rfc.max_pdu_size = cpu_to_le16(size);
3244 
3245 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 				   (unsigned long) &rfc);
3247 
3248 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3249 			l2cap_add_opt_efs(&ptr, chan);
3250 
3251 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3252 			if (chan->fcs == L2CAP_FCS_NONE ||
3253 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3254 				chan->fcs = L2CAP_FCS_NONE;
3255 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3256 						   chan->fcs);
3257 			}
3258 		break;
3259 	}
3260 
3261 	req->dcid  = cpu_to_le16(chan->dcid);
3262 	req->flags = __constant_cpu_to_le16(0);
3263 
3264 	return ptr - data;
3265 }
3266 
3267 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3268 {
3269 	struct l2cap_conf_rsp *rsp = data;
3270 	void *ptr = rsp->data;
3271 	void *req = chan->conf_req;
3272 	int len = chan->conf_len;
3273 	int type, hint, olen;
3274 	unsigned long val;
3275 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3276 	struct l2cap_conf_efs efs;
3277 	u8 remote_efs = 0;
3278 	u16 mtu = L2CAP_DEFAULT_MTU;
3279 	u16 result = L2CAP_CONF_SUCCESS;
3280 	u16 size;
3281 
3282 	BT_DBG("chan %p", chan);
3283 
3284 	while (len >= L2CAP_CONF_OPT_SIZE) {
3285 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3286 
3287 		hint  = type & L2CAP_CONF_HINT;
3288 		type &= L2CAP_CONF_MASK;
3289 
3290 		switch (type) {
3291 		case L2CAP_CONF_MTU:
3292 			mtu = val;
3293 			break;
3294 
3295 		case L2CAP_CONF_FLUSH_TO:
3296 			chan->flush_to = val;
3297 			break;
3298 
3299 		case L2CAP_CONF_QOS:
3300 			break;
3301 
3302 		case L2CAP_CONF_RFC:
3303 			if (olen == sizeof(rfc))
3304 				memcpy(&rfc, (void *) val, olen);
3305 			break;
3306 
3307 		case L2CAP_CONF_FCS:
3308 			if (val == L2CAP_FCS_NONE)
3309 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3310 			break;
3311 
3312 		case L2CAP_CONF_EFS:
3313 			remote_efs = 1;
3314 			if (olen == sizeof(efs))
3315 				memcpy(&efs, (void *) val, olen);
3316 			break;
3317 
3318 		case L2CAP_CONF_EWS:
3319 			if (!enable_hs)
3320 				return -ECONNREFUSED;
3321 
3322 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3323 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3324 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3325 			chan->remote_tx_win = val;
3326 			break;
3327 
3328 		default:
3329 			if (hint)
3330 				break;
3331 
3332 			result = L2CAP_CONF_UNKNOWN;
3333 			*((u8 *) ptr++) = type;
3334 			break;
3335 		}
3336 	}
3337 
3338 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3339 		goto done;
3340 
3341 	switch (chan->mode) {
3342 	case L2CAP_MODE_STREAMING:
3343 	case L2CAP_MODE_ERTM:
3344 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3345 			chan->mode = l2cap_select_mode(rfc.mode,
3346 						       chan->conn->feat_mask);
3347 			break;
3348 		}
3349 
3350 		if (remote_efs) {
3351 			if (__l2cap_efs_supported(chan))
3352 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3353 			else
3354 				return -ECONNREFUSED;
3355 		}
3356 
3357 		if (chan->mode != rfc.mode)
3358 			return -ECONNREFUSED;
3359 
3360 		break;
3361 	}
3362 
3363 done:
3364 	if (chan->mode != rfc.mode) {
3365 		result = L2CAP_CONF_UNACCEPT;
3366 		rfc.mode = chan->mode;
3367 
3368 		if (chan->num_conf_rsp == 1)
3369 			return -ECONNREFUSED;
3370 
3371 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3372 				   (unsigned long) &rfc);
3373 	}
3374 
3375 	if (result == L2CAP_CONF_SUCCESS) {
3376 		/* Configure output options and let the other side know
3377 		 * which ones we don't like. */
3378 
3379 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3380 			result = L2CAP_CONF_UNACCEPT;
3381 		else {
3382 			chan->omtu = mtu;
3383 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3384 		}
3385 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3386 
3387 		if (remote_efs) {
3388 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3389 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3390 			    efs.stype != chan->local_stype) {
3391 
3392 				result = L2CAP_CONF_UNACCEPT;
3393 
3394 				if (chan->num_conf_req >= 1)
3395 					return -ECONNREFUSED;
3396 
3397 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3398 						   sizeof(efs),
3399 						   (unsigned long) &efs);
3400 			} else {
3401 				/* Send PENDING Conf Rsp */
3402 				result = L2CAP_CONF_PENDING;
3403 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3404 			}
3405 		}
3406 
3407 		switch (rfc.mode) {
3408 		case L2CAP_MODE_BASIC:
3409 			chan->fcs = L2CAP_FCS_NONE;
3410 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3411 			break;
3412 
3413 		case L2CAP_MODE_ERTM:
3414 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3415 				chan->remote_tx_win = rfc.txwin_size;
3416 			else
3417 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3418 
3419 			chan->remote_max_tx = rfc.max_transmit;
3420 
3421 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3422 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3423 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3424 			rfc.max_pdu_size = cpu_to_le16(size);
3425 			chan->remote_mps = size;
3426 
3427 			__l2cap_set_ertm_timeouts(chan, &rfc);
3428 
3429 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3430 
3431 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3432 					   sizeof(rfc), (unsigned long) &rfc);
3433 
3434 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3435 				chan->remote_id = efs.id;
3436 				chan->remote_stype = efs.stype;
3437 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3438 				chan->remote_flush_to =
3439 					le32_to_cpu(efs.flush_to);
3440 				chan->remote_acc_lat =
3441 					le32_to_cpu(efs.acc_lat);
3442 				chan->remote_sdu_itime =
3443 					le32_to_cpu(efs.sdu_itime);
3444 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3445 						   sizeof(efs),
3446 						   (unsigned long) &efs);
3447 			}
3448 			break;
3449 
3450 		case L2CAP_MODE_STREAMING:
3451 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3452 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3453 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3454 			rfc.max_pdu_size = cpu_to_le16(size);
3455 			chan->remote_mps = size;
3456 
3457 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3458 
3459 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 					   (unsigned long) &rfc);
3461 
3462 			break;
3463 
3464 		default:
3465 			result = L2CAP_CONF_UNACCEPT;
3466 
3467 			memset(&rfc, 0, sizeof(rfc));
3468 			rfc.mode = chan->mode;
3469 		}
3470 
3471 		if (result == L2CAP_CONF_SUCCESS)
3472 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3473 	}
3474 	rsp->scid   = cpu_to_le16(chan->dcid);
3475 	rsp->result = cpu_to_le16(result);
3476 	rsp->flags  = __constant_cpu_to_le16(0);
3477 
3478 	return ptr - data;
3479 }
3480 
3481 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3482 				void *data, u16 *result)
3483 {
3484 	struct l2cap_conf_req *req = data;
3485 	void *ptr = req->data;
3486 	int type, olen;
3487 	unsigned long val;
3488 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3489 	struct l2cap_conf_efs efs;
3490 
3491 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3492 
3493 	while (len >= L2CAP_CONF_OPT_SIZE) {
3494 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3495 
3496 		switch (type) {
3497 		case L2CAP_CONF_MTU:
3498 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3499 				*result = L2CAP_CONF_UNACCEPT;
3500 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3501 			} else
3502 				chan->imtu = val;
3503 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3504 			break;
3505 
3506 		case L2CAP_CONF_FLUSH_TO:
3507 			chan->flush_to = val;
3508 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3509 					   2, chan->flush_to);
3510 			break;
3511 
3512 		case L2CAP_CONF_RFC:
3513 			if (olen == sizeof(rfc))
3514 				memcpy(&rfc, (void *)val, olen);
3515 
3516 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3517 			    rfc.mode != chan->mode)
3518 				return -ECONNREFUSED;
3519 
3520 			chan->fcs = 0;
3521 
3522 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3523 					   sizeof(rfc), (unsigned long) &rfc);
3524 			break;
3525 
3526 		case L2CAP_CONF_EWS:
3527 			chan->ack_win = min_t(u16, val, chan->ack_win);
3528 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3529 					   chan->tx_win);
3530 			break;
3531 
3532 		case L2CAP_CONF_EFS:
3533 			if (olen == sizeof(efs))
3534 				memcpy(&efs, (void *)val, olen);
3535 
3536 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3537 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3538 			    efs.stype != chan->local_stype)
3539 				return -ECONNREFUSED;
3540 
3541 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3542 					   (unsigned long) &efs);
3543 			break;
3544 
3545 		case L2CAP_CONF_FCS:
3546 			if (*result == L2CAP_CONF_PENDING)
3547 				if (val == L2CAP_FCS_NONE)
3548 					set_bit(CONF_RECV_NO_FCS,
3549 						&chan->conf_state);
3550 			break;
3551 		}
3552 	}
3553 
3554 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3555 		return -ECONNREFUSED;
3556 
3557 	chan->mode = rfc.mode;
3558 
3559 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3560 		switch (rfc.mode) {
3561 		case L2CAP_MODE_ERTM:
3562 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3563 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3564 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3565 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3566 				chan->ack_win = min_t(u16, chan->ack_win,
3567 						      rfc.txwin_size);
3568 
3569 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3570 				chan->local_msdu = le16_to_cpu(efs.msdu);
3571 				chan->local_sdu_itime =
3572 					le32_to_cpu(efs.sdu_itime);
3573 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3574 				chan->local_flush_to =
3575 					le32_to_cpu(efs.flush_to);
3576 			}
3577 			break;
3578 
3579 		case L2CAP_MODE_STREAMING:
3580 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3581 		}
3582 	}
3583 
3584 	req->dcid   = cpu_to_le16(chan->dcid);
3585 	req->flags  = __constant_cpu_to_le16(0);
3586 
3587 	return ptr - data;
3588 }
3589 
3590 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3591 				u16 result, u16 flags)
3592 {
3593 	struct l2cap_conf_rsp *rsp = data;
3594 	void *ptr = rsp->data;
3595 
3596 	BT_DBG("chan %p", chan);
3597 
3598 	rsp->scid   = cpu_to_le16(chan->dcid);
3599 	rsp->result = cpu_to_le16(result);
3600 	rsp->flags  = cpu_to_le16(flags);
3601 
3602 	return ptr - data;
3603 }
3604 
3605 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3606 {
3607 	struct l2cap_conn_rsp rsp;
3608 	struct l2cap_conn *conn = chan->conn;
3609 	u8 buf[128];
3610 	u8 rsp_code;
3611 
3612 	rsp.scid   = cpu_to_le16(chan->dcid);
3613 	rsp.dcid   = cpu_to_le16(chan->scid);
3614 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3615 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3616 
3617 	if (chan->hs_hcon)
3618 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3619 	else
3620 		rsp_code = L2CAP_CONN_RSP;
3621 
3622 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3623 
3624 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3625 
3626 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3627 		return;
3628 
3629 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3630 		       l2cap_build_conf_req(chan, buf), buf);
3631 	chan->num_conf_req++;
3632 }
3633 
3634 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3635 {
3636 	int type, olen;
3637 	unsigned long val;
3638 	/* Use sane default values in case a misbehaving remote device
3639 	 * did not send an RFC or extended window size option.
3640 	 */
3641 	u16 txwin_ext = chan->ack_win;
3642 	struct l2cap_conf_rfc rfc = {
3643 		.mode = chan->mode,
3644 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3645 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3646 		.max_pdu_size = cpu_to_le16(chan->imtu),
3647 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3648 	};
3649 
3650 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3651 
3652 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3653 		return;
3654 
3655 	while (len >= L2CAP_CONF_OPT_SIZE) {
3656 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3657 
3658 		switch (type) {
3659 		case L2CAP_CONF_RFC:
3660 			if (olen == sizeof(rfc))
3661 				memcpy(&rfc, (void *)val, olen);
3662 			break;
3663 		case L2CAP_CONF_EWS:
3664 			txwin_ext = val;
3665 			break;
3666 		}
3667 	}
3668 
3669 	switch (rfc.mode) {
3670 	case L2CAP_MODE_ERTM:
3671 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3672 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3673 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3674 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3675 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3676 		else
3677 			chan->ack_win = min_t(u16, chan->ack_win,
3678 					      rfc.txwin_size);
3679 		break;
3680 	case L2CAP_MODE_STREAMING:
3681 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3682 	}
3683 }
3684 
3685 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3686 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3687 				    u8 *data)
3688 {
3689 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3690 
3691 	if (cmd_len < sizeof(*rej))
3692 		return -EPROTO;
3693 
3694 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3695 		return 0;
3696 
3697 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3698 	    cmd->ident == conn->info_ident) {
3699 		cancel_delayed_work(&conn->info_timer);
3700 
3701 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3702 		conn->info_ident = 0;
3703 
3704 		l2cap_conn_start(conn);
3705 	}
3706 
3707 	return 0;
3708 }
3709 
3710 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3711 					struct l2cap_cmd_hdr *cmd,
3712 					u8 *data, u8 rsp_code, u8 amp_id)
3713 {
3714 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3715 	struct l2cap_conn_rsp rsp;
3716 	struct l2cap_chan *chan = NULL, *pchan;
3717 	struct sock *parent, *sk = NULL;
3718 	int result, status = L2CAP_CS_NO_INFO;
3719 
3720 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3721 	__le16 psm = req->psm;
3722 
3723 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3724 
3725 	/* Check if we have socket listening on psm */
3726 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3727 	if (!pchan) {
3728 		result = L2CAP_CR_BAD_PSM;
3729 		goto sendresp;
3730 	}
3731 
3732 	parent = pchan->sk;
3733 
3734 	mutex_lock(&conn->chan_lock);
3735 	lock_sock(parent);
3736 
3737 	/* Check if the ACL is secure enough (if not SDP) */
3738 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3739 	    !hci_conn_check_link_mode(conn->hcon)) {
3740 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3741 		result = L2CAP_CR_SEC_BLOCK;
3742 		goto response;
3743 	}
3744 
3745 	result = L2CAP_CR_NO_MEM;
3746 
3747 	/* Check if we already have channel with that dcid */
3748 	if (__l2cap_get_chan_by_dcid(conn, scid))
3749 		goto response;
3750 
3751 	chan = pchan->ops->new_connection(pchan);
3752 	if (!chan)
3753 		goto response;
3754 
3755 	sk = chan->sk;
3756 
3757 	bacpy(&bt_sk(sk)->src, conn->src);
3758 	bacpy(&bt_sk(sk)->dst, conn->dst);
3759 	chan->psm  = psm;
3760 	chan->dcid = scid;
3761 	chan->local_amp_id = amp_id;
3762 
3763 	__l2cap_chan_add(conn, chan);
3764 
3765 	dcid = chan->scid;
3766 
3767 	__set_chan_timer(chan, sk->sk_sndtimeo);
3768 
3769 	chan->ident = cmd->ident;
3770 
3771 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3772 		if (l2cap_chan_check_security(chan)) {
3773 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3774 				__l2cap_state_change(chan, BT_CONNECT2);
3775 				result = L2CAP_CR_PEND;
3776 				status = L2CAP_CS_AUTHOR_PEND;
3777 				chan->ops->defer(chan);
3778 			} else {
3779 				/* Force pending result for AMP controllers.
3780 				 * The connection will succeed after the
3781 				 * physical link is up.
3782 				 */
3783 				if (amp_id) {
3784 					__l2cap_state_change(chan, BT_CONNECT2);
3785 					result = L2CAP_CR_PEND;
3786 				} else {
3787 					__l2cap_state_change(chan, BT_CONFIG);
3788 					result = L2CAP_CR_SUCCESS;
3789 				}
3790 				status = L2CAP_CS_NO_INFO;
3791 			}
3792 		} else {
3793 			__l2cap_state_change(chan, BT_CONNECT2);
3794 			result = L2CAP_CR_PEND;
3795 			status = L2CAP_CS_AUTHEN_PEND;
3796 		}
3797 	} else {
3798 		__l2cap_state_change(chan, BT_CONNECT2);
3799 		result = L2CAP_CR_PEND;
3800 		status = L2CAP_CS_NO_INFO;
3801 	}
3802 
3803 response:
3804 	release_sock(parent);
3805 	mutex_unlock(&conn->chan_lock);
3806 
3807 sendresp:
3808 	rsp.scid   = cpu_to_le16(scid);
3809 	rsp.dcid   = cpu_to_le16(dcid);
3810 	rsp.result = cpu_to_le16(result);
3811 	rsp.status = cpu_to_le16(status);
3812 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3813 
3814 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3815 		struct l2cap_info_req info;
3816 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3817 
3818 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3819 		conn->info_ident = l2cap_get_ident(conn);
3820 
3821 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3822 
3823 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3824 			       sizeof(info), &info);
3825 	}
3826 
3827 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3828 	    result == L2CAP_CR_SUCCESS) {
3829 		u8 buf[128];
3830 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3831 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3832 			       l2cap_build_conf_req(chan, buf), buf);
3833 		chan->num_conf_req++;
3834 	}
3835 
3836 	return chan;
3837 }
3838 
3839 static int l2cap_connect_req(struct l2cap_conn *conn,
3840 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3841 {
3842 	struct hci_dev *hdev = conn->hcon->hdev;
3843 	struct hci_conn *hcon = conn->hcon;
3844 
3845 	if (cmd_len < sizeof(struct l2cap_conn_req))
3846 		return -EPROTO;
3847 
3848 	hci_dev_lock(hdev);
3849 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3850 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3851 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3852 				      hcon->dst_type, 0, NULL, 0,
3853 				      hcon->dev_class);
3854 	hci_dev_unlock(hdev);
3855 
3856 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3857 	return 0;
3858 }
3859 
3860 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3861 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3862 				    u8 *data)
3863 {
3864 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3865 	u16 scid, dcid, result, status;
3866 	struct l2cap_chan *chan;
3867 	u8 req[128];
3868 	int err;
3869 
3870 	if (cmd_len < sizeof(*rsp))
3871 		return -EPROTO;
3872 
3873 	scid   = __le16_to_cpu(rsp->scid);
3874 	dcid   = __le16_to_cpu(rsp->dcid);
3875 	result = __le16_to_cpu(rsp->result);
3876 	status = __le16_to_cpu(rsp->status);
3877 
3878 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3879 	       dcid, scid, result, status);
3880 
3881 	mutex_lock(&conn->chan_lock);
3882 
3883 	if (scid) {
3884 		chan = __l2cap_get_chan_by_scid(conn, scid);
3885 		if (!chan) {
3886 			err = -EFAULT;
3887 			goto unlock;
3888 		}
3889 	} else {
3890 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3891 		if (!chan) {
3892 			err = -EFAULT;
3893 			goto unlock;
3894 		}
3895 	}
3896 
3897 	err = 0;
3898 
3899 	l2cap_chan_lock(chan);
3900 
3901 	switch (result) {
3902 	case L2CAP_CR_SUCCESS:
3903 		l2cap_state_change(chan, BT_CONFIG);
3904 		chan->ident = 0;
3905 		chan->dcid = dcid;
3906 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3907 
3908 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3909 			break;
3910 
3911 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3912 			       l2cap_build_conf_req(chan, req), req);
3913 		chan->num_conf_req++;
3914 		break;
3915 
3916 	case L2CAP_CR_PEND:
3917 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3918 		break;
3919 
3920 	default:
3921 		l2cap_chan_del(chan, ECONNREFUSED);
3922 		break;
3923 	}
3924 
3925 	l2cap_chan_unlock(chan);
3926 
3927 unlock:
3928 	mutex_unlock(&conn->chan_lock);
3929 
3930 	return err;
3931 }
3932 
3933 static inline void set_default_fcs(struct l2cap_chan *chan)
3934 {
3935 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3936 	 * sides request it.
3937 	 */
3938 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3939 		chan->fcs = L2CAP_FCS_NONE;
3940 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3941 		chan->fcs = L2CAP_FCS_CRC16;
3942 }
3943 
3944 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3945 				    u8 ident, u16 flags)
3946 {
3947 	struct l2cap_conn *conn = chan->conn;
3948 
3949 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3950 	       flags);
3951 
3952 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3953 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3954 
3955 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3956 		       l2cap_build_conf_rsp(chan, data,
3957 					    L2CAP_CONF_SUCCESS, flags), data);
3958 }
3959 
3960 static inline int l2cap_config_req(struct l2cap_conn *conn,
3961 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3962 				   u8 *data)
3963 {
3964 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3965 	u16 dcid, flags;
3966 	u8 rsp[64];
3967 	struct l2cap_chan *chan;
3968 	int len, err = 0;
3969 
3970 	if (cmd_len < sizeof(*req))
3971 		return -EPROTO;
3972 
3973 	dcid  = __le16_to_cpu(req->dcid);
3974 	flags = __le16_to_cpu(req->flags);
3975 
3976 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3977 
3978 	chan = l2cap_get_chan_by_scid(conn, dcid);
3979 	if (!chan)
3980 		return -ENOENT;
3981 
3982 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3983 		struct l2cap_cmd_rej_cid rej;
3984 
3985 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3986 		rej.scid = cpu_to_le16(chan->scid);
3987 		rej.dcid = cpu_to_le16(chan->dcid);
3988 
3989 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3990 			       sizeof(rej), &rej);
3991 		goto unlock;
3992 	}
3993 
3994 	/* Reject if config buffer is too small. */
3995 	len = cmd_len - sizeof(*req);
3996 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
3997 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3998 			       l2cap_build_conf_rsp(chan, rsp,
3999 			       L2CAP_CONF_REJECT, flags), rsp);
4000 		goto unlock;
4001 	}
4002 
4003 	/* Store config. */
4004 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4005 	chan->conf_len += len;
4006 
4007 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4008 		/* Incomplete config. Send empty response. */
4009 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4010 			       l2cap_build_conf_rsp(chan, rsp,
4011 			       L2CAP_CONF_SUCCESS, flags), rsp);
4012 		goto unlock;
4013 	}
4014 
4015 	/* Complete config. */
4016 	len = l2cap_parse_conf_req(chan, rsp);
4017 	if (len < 0) {
4018 		l2cap_send_disconn_req(chan, ECONNRESET);
4019 		goto unlock;
4020 	}
4021 
4022 	chan->ident = cmd->ident;
4023 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4024 	chan->num_conf_rsp++;
4025 
4026 	/* Reset config buffer. */
4027 	chan->conf_len = 0;
4028 
4029 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4030 		goto unlock;
4031 
4032 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4033 		set_default_fcs(chan);
4034 
4035 		if (chan->mode == L2CAP_MODE_ERTM ||
4036 		    chan->mode == L2CAP_MODE_STREAMING)
4037 			err = l2cap_ertm_init(chan);
4038 
4039 		if (err < 0)
4040 			l2cap_send_disconn_req(chan, -err);
4041 		else
4042 			l2cap_chan_ready(chan);
4043 
4044 		goto unlock;
4045 	}
4046 
4047 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4048 		u8 buf[64];
4049 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4050 			       l2cap_build_conf_req(chan, buf), buf);
4051 		chan->num_conf_req++;
4052 	}
4053 
4054 	/* Got Conf Rsp PENDING from remote side and asume we sent
4055 	   Conf Rsp PENDING in the code above */
4056 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4057 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4058 
4059 		/* check compatibility */
4060 
4061 		/* Send rsp for BR/EDR channel */
4062 		if (!chan->hs_hcon)
4063 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4064 		else
4065 			chan->ident = cmd->ident;
4066 	}
4067 
4068 unlock:
4069 	l2cap_chan_unlock(chan);
4070 	return err;
4071 }
4072 
4073 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4074 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4075 				   u8 *data)
4076 {
4077 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4078 	u16 scid, flags, result;
4079 	struct l2cap_chan *chan;
4080 	int len = cmd_len - sizeof(*rsp);
4081 	int err = 0;
4082 
4083 	if (cmd_len < sizeof(*rsp))
4084 		return -EPROTO;
4085 
4086 	scid   = __le16_to_cpu(rsp->scid);
4087 	flags  = __le16_to_cpu(rsp->flags);
4088 	result = __le16_to_cpu(rsp->result);
4089 
4090 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4091 	       result, len);
4092 
4093 	chan = l2cap_get_chan_by_scid(conn, scid);
4094 	if (!chan)
4095 		return 0;
4096 
4097 	switch (result) {
4098 	case L2CAP_CONF_SUCCESS:
4099 		l2cap_conf_rfc_get(chan, rsp->data, len);
4100 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4101 		break;
4102 
4103 	case L2CAP_CONF_PENDING:
4104 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4105 
4106 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4107 			char buf[64];
4108 
4109 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4110 						   buf, &result);
4111 			if (len < 0) {
4112 				l2cap_send_disconn_req(chan, ECONNRESET);
4113 				goto done;
4114 			}
4115 
4116 			if (!chan->hs_hcon) {
4117 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4118 							0);
4119 			} else {
4120 				if (l2cap_check_efs(chan)) {
4121 					amp_create_logical_link(chan);
4122 					chan->ident = cmd->ident;
4123 				}
4124 			}
4125 		}
4126 		goto done;
4127 
4128 	case L2CAP_CONF_UNACCEPT:
4129 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4130 			char req[64];
4131 
4132 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4133 				l2cap_send_disconn_req(chan, ECONNRESET);
4134 				goto done;
4135 			}
4136 
4137 			/* throw out any old stored conf requests */
4138 			result = L2CAP_CONF_SUCCESS;
4139 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4140 						   req, &result);
4141 			if (len < 0) {
4142 				l2cap_send_disconn_req(chan, ECONNRESET);
4143 				goto done;
4144 			}
4145 
4146 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4147 				       L2CAP_CONF_REQ, len, req);
4148 			chan->num_conf_req++;
4149 			if (result != L2CAP_CONF_SUCCESS)
4150 				goto done;
4151 			break;
4152 		}
4153 
4154 	default:
4155 		l2cap_chan_set_err(chan, ECONNRESET);
4156 
4157 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4158 		l2cap_send_disconn_req(chan, ECONNRESET);
4159 		goto done;
4160 	}
4161 
4162 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4163 		goto done;
4164 
4165 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4166 
4167 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4168 		set_default_fcs(chan);
4169 
4170 		if (chan->mode == L2CAP_MODE_ERTM ||
4171 		    chan->mode == L2CAP_MODE_STREAMING)
4172 			err = l2cap_ertm_init(chan);
4173 
4174 		if (err < 0)
4175 			l2cap_send_disconn_req(chan, -err);
4176 		else
4177 			l2cap_chan_ready(chan);
4178 	}
4179 
4180 done:
4181 	l2cap_chan_unlock(chan);
4182 	return err;
4183 }
4184 
4185 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4186 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4187 				       u8 *data)
4188 {
4189 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4190 	struct l2cap_disconn_rsp rsp;
4191 	u16 dcid, scid;
4192 	struct l2cap_chan *chan;
4193 	struct sock *sk;
4194 
4195 	if (cmd_len != sizeof(*req))
4196 		return -EPROTO;
4197 
4198 	scid = __le16_to_cpu(req->scid);
4199 	dcid = __le16_to_cpu(req->dcid);
4200 
4201 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4202 
4203 	mutex_lock(&conn->chan_lock);
4204 
4205 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4206 	if (!chan) {
4207 		mutex_unlock(&conn->chan_lock);
4208 		return 0;
4209 	}
4210 
4211 	l2cap_chan_lock(chan);
4212 
4213 	sk = chan->sk;
4214 
4215 	rsp.dcid = cpu_to_le16(chan->scid);
4216 	rsp.scid = cpu_to_le16(chan->dcid);
4217 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4218 
4219 	lock_sock(sk);
4220 	sk->sk_shutdown = SHUTDOWN_MASK;
4221 	release_sock(sk);
4222 
4223 	l2cap_chan_hold(chan);
4224 	l2cap_chan_del(chan, ECONNRESET);
4225 
4226 	l2cap_chan_unlock(chan);
4227 
4228 	chan->ops->close(chan);
4229 	l2cap_chan_put(chan);
4230 
4231 	mutex_unlock(&conn->chan_lock);
4232 
4233 	return 0;
4234 }
4235 
4236 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4237 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4238 				       u8 *data)
4239 {
4240 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4241 	u16 dcid, scid;
4242 	struct l2cap_chan *chan;
4243 
4244 	if (cmd_len != sizeof(*rsp))
4245 		return -EPROTO;
4246 
4247 	scid = __le16_to_cpu(rsp->scid);
4248 	dcid = __le16_to_cpu(rsp->dcid);
4249 
4250 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4251 
4252 	mutex_lock(&conn->chan_lock);
4253 
4254 	chan = __l2cap_get_chan_by_scid(conn, scid);
4255 	if (!chan) {
4256 		mutex_unlock(&conn->chan_lock);
4257 		return 0;
4258 	}
4259 
4260 	l2cap_chan_lock(chan);
4261 
4262 	l2cap_chan_hold(chan);
4263 	l2cap_chan_del(chan, 0);
4264 
4265 	l2cap_chan_unlock(chan);
4266 
4267 	chan->ops->close(chan);
4268 	l2cap_chan_put(chan);
4269 
4270 	mutex_unlock(&conn->chan_lock);
4271 
4272 	return 0;
4273 }
4274 
4275 static inline int l2cap_information_req(struct l2cap_conn *conn,
4276 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 					u8 *data)
4278 {
4279 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4280 	u16 type;
4281 
4282 	if (cmd_len != sizeof(*req))
4283 		return -EPROTO;
4284 
4285 	type = __le16_to_cpu(req->type);
4286 
4287 	BT_DBG("type 0x%4.4x", type);
4288 
4289 	if (type == L2CAP_IT_FEAT_MASK) {
4290 		u8 buf[8];
4291 		u32 feat_mask = l2cap_feat_mask;
4292 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4293 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4294 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4295 		if (!disable_ertm)
4296 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4297 				| L2CAP_FEAT_FCS;
4298 		if (enable_hs)
4299 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4300 				| L2CAP_FEAT_EXT_WINDOW;
4301 
4302 		put_unaligned_le32(feat_mask, rsp->data);
4303 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4304 			       buf);
4305 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4306 		u8 buf[12];
4307 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4308 
4309 		if (enable_hs)
4310 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4311 		else
4312 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4313 
4314 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4315 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4316 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4317 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4318 			       buf);
4319 	} else {
4320 		struct l2cap_info_rsp rsp;
4321 		rsp.type   = cpu_to_le16(type);
4322 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4323 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4324 			       &rsp);
4325 	}
4326 
4327 	return 0;
4328 }
4329 
4330 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4331 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 					u8 *data)
4333 {
4334 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4335 	u16 type, result;
4336 
4337 	if (cmd_len < sizeof(*rsp))
4338 		return -EPROTO;
4339 
4340 	type   = __le16_to_cpu(rsp->type);
4341 	result = __le16_to_cpu(rsp->result);
4342 
4343 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4344 
4345 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4346 	if (cmd->ident != conn->info_ident ||
4347 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4348 		return 0;
4349 
4350 	cancel_delayed_work(&conn->info_timer);
4351 
4352 	if (result != L2CAP_IR_SUCCESS) {
4353 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4354 		conn->info_ident = 0;
4355 
4356 		l2cap_conn_start(conn);
4357 
4358 		return 0;
4359 	}
4360 
4361 	switch (type) {
4362 	case L2CAP_IT_FEAT_MASK:
4363 		conn->feat_mask = get_unaligned_le32(rsp->data);
4364 
4365 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4366 			struct l2cap_info_req req;
4367 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4368 
4369 			conn->info_ident = l2cap_get_ident(conn);
4370 
4371 			l2cap_send_cmd(conn, conn->info_ident,
4372 				       L2CAP_INFO_REQ, sizeof(req), &req);
4373 		} else {
4374 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4375 			conn->info_ident = 0;
4376 
4377 			l2cap_conn_start(conn);
4378 		}
4379 		break;
4380 
4381 	case L2CAP_IT_FIXED_CHAN:
4382 		conn->fixed_chan_mask = rsp->data[0];
4383 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 		conn->info_ident = 0;
4385 
4386 		l2cap_conn_start(conn);
4387 		break;
4388 	}
4389 
4390 	return 0;
4391 }
4392 
4393 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4394 				    struct l2cap_cmd_hdr *cmd,
4395 				    u16 cmd_len, void *data)
4396 {
4397 	struct l2cap_create_chan_req *req = data;
4398 	struct l2cap_create_chan_rsp rsp;
4399 	struct l2cap_chan *chan;
4400 	struct hci_dev *hdev;
4401 	u16 psm, scid;
4402 
4403 	if (cmd_len != sizeof(*req))
4404 		return -EPROTO;
4405 
4406 	if (!enable_hs)
4407 		return -EINVAL;
4408 
4409 	psm = le16_to_cpu(req->psm);
4410 	scid = le16_to_cpu(req->scid);
4411 
4412 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4413 
4414 	/* For controller id 0 make BR/EDR connection */
4415 	if (req->amp_id == HCI_BREDR_ID) {
4416 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4417 			      req->amp_id);
4418 		return 0;
4419 	}
4420 
4421 	/* Validate AMP controller id */
4422 	hdev = hci_dev_get(req->amp_id);
4423 	if (!hdev)
4424 		goto error;
4425 
4426 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4427 		hci_dev_put(hdev);
4428 		goto error;
4429 	}
4430 
4431 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4432 			     req->amp_id);
4433 	if (chan) {
4434 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4435 		struct hci_conn *hs_hcon;
4436 
4437 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4438 		if (!hs_hcon) {
4439 			hci_dev_put(hdev);
4440 			return -EFAULT;
4441 		}
4442 
4443 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4444 
4445 		mgr->bredr_chan = chan;
4446 		chan->hs_hcon = hs_hcon;
4447 		chan->fcs = L2CAP_FCS_NONE;
4448 		conn->mtu = hdev->block_mtu;
4449 	}
4450 
4451 	hci_dev_put(hdev);
4452 
4453 	return 0;
4454 
4455 error:
4456 	rsp.dcid = 0;
4457 	rsp.scid = cpu_to_le16(scid);
4458 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4459 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4460 
4461 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4462 		       sizeof(rsp), &rsp);
4463 
4464 	return -EFAULT;
4465 }
4466 
4467 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4468 {
4469 	struct l2cap_move_chan_req req;
4470 	u8 ident;
4471 
4472 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4473 
4474 	ident = l2cap_get_ident(chan->conn);
4475 	chan->ident = ident;
4476 
4477 	req.icid = cpu_to_le16(chan->scid);
4478 	req.dest_amp_id = dest_amp_id;
4479 
4480 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4481 		       &req);
4482 
4483 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4484 }
4485 
4486 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4487 {
4488 	struct l2cap_move_chan_rsp rsp;
4489 
4490 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4491 
4492 	rsp.icid = cpu_to_le16(chan->dcid);
4493 	rsp.result = cpu_to_le16(result);
4494 
4495 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4496 		       sizeof(rsp), &rsp);
4497 }
4498 
4499 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4500 {
4501 	struct l2cap_move_chan_cfm cfm;
4502 
4503 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4504 
4505 	chan->ident = l2cap_get_ident(chan->conn);
4506 
4507 	cfm.icid = cpu_to_le16(chan->scid);
4508 	cfm.result = cpu_to_le16(result);
4509 
4510 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4511 		       sizeof(cfm), &cfm);
4512 
4513 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4514 }
4515 
4516 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4517 {
4518 	struct l2cap_move_chan_cfm cfm;
4519 
4520 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4521 
4522 	cfm.icid = cpu_to_le16(icid);
4523 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4524 
4525 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4526 		       sizeof(cfm), &cfm);
4527 }
4528 
4529 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4530 					 u16 icid)
4531 {
4532 	struct l2cap_move_chan_cfm_rsp rsp;
4533 
4534 	BT_DBG("icid 0x%4.4x", icid);
4535 
4536 	rsp.icid = cpu_to_le16(icid);
4537 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4538 }
4539 
4540 static void __release_logical_link(struct l2cap_chan *chan)
4541 {
4542 	chan->hs_hchan = NULL;
4543 	chan->hs_hcon = NULL;
4544 
4545 	/* Placeholder - release the logical link */
4546 }
4547 
4548 static void l2cap_logical_fail(struct l2cap_chan *chan)
4549 {
4550 	/* Logical link setup failed */
4551 	if (chan->state != BT_CONNECTED) {
4552 		/* Create channel failure, disconnect */
4553 		l2cap_send_disconn_req(chan, ECONNRESET);
4554 		return;
4555 	}
4556 
4557 	switch (chan->move_role) {
4558 	case L2CAP_MOVE_ROLE_RESPONDER:
4559 		l2cap_move_done(chan);
4560 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4561 		break;
4562 	case L2CAP_MOVE_ROLE_INITIATOR:
4563 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4564 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4565 			/* Remote has only sent pending or
4566 			 * success responses, clean up
4567 			 */
4568 			l2cap_move_done(chan);
4569 		}
4570 
4571 		/* Other amp move states imply that the move
4572 		 * has already aborted
4573 		 */
4574 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4575 		break;
4576 	}
4577 }
4578 
4579 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4580 					struct hci_chan *hchan)
4581 {
4582 	struct l2cap_conf_rsp rsp;
4583 
4584 	chan->hs_hchan = hchan;
4585 	chan->hs_hcon->l2cap_data = chan->conn;
4586 
4587 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4588 
4589 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4590 		int err;
4591 
4592 		set_default_fcs(chan);
4593 
4594 		err = l2cap_ertm_init(chan);
4595 		if (err < 0)
4596 			l2cap_send_disconn_req(chan, -err);
4597 		else
4598 			l2cap_chan_ready(chan);
4599 	}
4600 }
4601 
4602 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4603 				      struct hci_chan *hchan)
4604 {
4605 	chan->hs_hcon = hchan->conn;
4606 	chan->hs_hcon->l2cap_data = chan->conn;
4607 
4608 	BT_DBG("move_state %d", chan->move_state);
4609 
4610 	switch (chan->move_state) {
4611 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4612 		/* Move confirm will be sent after a success
4613 		 * response is received
4614 		 */
4615 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4616 		break;
4617 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4618 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4619 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4620 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4621 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4622 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4623 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4624 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4625 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4626 		}
4627 		break;
4628 	default:
4629 		/* Move was not in expected state, free the channel */
4630 		__release_logical_link(chan);
4631 
4632 		chan->move_state = L2CAP_MOVE_STABLE;
4633 	}
4634 }
4635 
4636 /* Call with chan locked */
4637 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4638 		       u8 status)
4639 {
4640 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4641 
4642 	if (status) {
4643 		l2cap_logical_fail(chan);
4644 		__release_logical_link(chan);
4645 		return;
4646 	}
4647 
4648 	if (chan->state != BT_CONNECTED) {
4649 		/* Ignore logical link if channel is on BR/EDR */
4650 		if (chan->local_amp_id)
4651 			l2cap_logical_finish_create(chan, hchan);
4652 	} else {
4653 		l2cap_logical_finish_move(chan, hchan);
4654 	}
4655 }
4656 
4657 void l2cap_move_start(struct l2cap_chan *chan)
4658 {
4659 	BT_DBG("chan %p", chan);
4660 
4661 	if (chan->local_amp_id == HCI_BREDR_ID) {
4662 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4663 			return;
4664 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4665 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4666 		/* Placeholder - start physical link setup */
4667 	} else {
4668 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4669 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4670 		chan->move_id = 0;
4671 		l2cap_move_setup(chan);
4672 		l2cap_send_move_chan_req(chan, 0);
4673 	}
4674 }
4675 
4676 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4677 			    u8 local_amp_id, u8 remote_amp_id)
4678 {
4679 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4680 	       local_amp_id, remote_amp_id);
4681 
4682 	chan->fcs = L2CAP_FCS_NONE;
4683 
4684 	/* Outgoing channel on AMP */
4685 	if (chan->state == BT_CONNECT) {
4686 		if (result == L2CAP_CR_SUCCESS) {
4687 			chan->local_amp_id = local_amp_id;
4688 			l2cap_send_create_chan_req(chan, remote_amp_id);
4689 		} else {
4690 			/* Revert to BR/EDR connect */
4691 			l2cap_send_conn_req(chan);
4692 		}
4693 
4694 		return;
4695 	}
4696 
4697 	/* Incoming channel on AMP */
4698 	if (__l2cap_no_conn_pending(chan)) {
4699 		struct l2cap_conn_rsp rsp;
4700 		char buf[128];
4701 		rsp.scid = cpu_to_le16(chan->dcid);
4702 		rsp.dcid = cpu_to_le16(chan->scid);
4703 
4704 		if (result == L2CAP_CR_SUCCESS) {
4705 			/* Send successful response */
4706 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4707 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4708 		} else {
4709 			/* Send negative response */
4710 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4711 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4712 		}
4713 
4714 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4715 			       sizeof(rsp), &rsp);
4716 
4717 		if (result == L2CAP_CR_SUCCESS) {
4718 			__l2cap_state_change(chan, BT_CONFIG);
4719 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4720 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4721 				       L2CAP_CONF_REQ,
4722 				       l2cap_build_conf_req(chan, buf), buf);
4723 			chan->num_conf_req++;
4724 		}
4725 	}
4726 }
4727 
4728 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4729 				   u8 remote_amp_id)
4730 {
4731 	l2cap_move_setup(chan);
4732 	chan->move_id = local_amp_id;
4733 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4734 
4735 	l2cap_send_move_chan_req(chan, remote_amp_id);
4736 }
4737 
4738 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4739 {
4740 	struct hci_chan *hchan = NULL;
4741 
4742 	/* Placeholder - get hci_chan for logical link */
4743 
4744 	if (hchan) {
4745 		if (hchan->state == BT_CONNECTED) {
4746 			/* Logical link is ready to go */
4747 			chan->hs_hcon = hchan->conn;
4748 			chan->hs_hcon->l2cap_data = chan->conn;
4749 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4750 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4751 
4752 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4753 		} else {
4754 			/* Wait for logical link to be ready */
4755 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4756 		}
4757 	} else {
4758 		/* Logical link not available */
4759 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4760 	}
4761 }
4762 
4763 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4764 {
4765 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4766 		u8 rsp_result;
4767 		if (result == -EINVAL)
4768 			rsp_result = L2CAP_MR_BAD_ID;
4769 		else
4770 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4771 
4772 		l2cap_send_move_chan_rsp(chan, rsp_result);
4773 	}
4774 
4775 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4776 	chan->move_state = L2CAP_MOVE_STABLE;
4777 
4778 	/* Restart data transmission */
4779 	l2cap_ertm_send(chan);
4780 }
4781 
4782 /* Invoke with locked chan */
4783 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4784 {
4785 	u8 local_amp_id = chan->local_amp_id;
4786 	u8 remote_amp_id = chan->remote_amp_id;
4787 
4788 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4789 	       chan, result, local_amp_id, remote_amp_id);
4790 
4791 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4792 		l2cap_chan_unlock(chan);
4793 		return;
4794 	}
4795 
4796 	if (chan->state != BT_CONNECTED) {
4797 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4798 	} else if (result != L2CAP_MR_SUCCESS) {
4799 		l2cap_do_move_cancel(chan, result);
4800 	} else {
4801 		switch (chan->move_role) {
4802 		case L2CAP_MOVE_ROLE_INITIATOR:
4803 			l2cap_do_move_initiate(chan, local_amp_id,
4804 					       remote_amp_id);
4805 			break;
4806 		case L2CAP_MOVE_ROLE_RESPONDER:
4807 			l2cap_do_move_respond(chan, result);
4808 			break;
4809 		default:
4810 			l2cap_do_move_cancel(chan, result);
4811 			break;
4812 		}
4813 	}
4814 }
4815 
4816 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4817 					 struct l2cap_cmd_hdr *cmd,
4818 					 u16 cmd_len, void *data)
4819 {
4820 	struct l2cap_move_chan_req *req = data;
4821 	struct l2cap_move_chan_rsp rsp;
4822 	struct l2cap_chan *chan;
4823 	u16 icid = 0;
4824 	u16 result = L2CAP_MR_NOT_ALLOWED;
4825 
4826 	if (cmd_len != sizeof(*req))
4827 		return -EPROTO;
4828 
4829 	icid = le16_to_cpu(req->icid);
4830 
4831 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4832 
4833 	if (!enable_hs)
4834 		return -EINVAL;
4835 
4836 	chan = l2cap_get_chan_by_dcid(conn, icid);
4837 	if (!chan) {
4838 		rsp.icid = cpu_to_le16(icid);
4839 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4840 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4841 			       sizeof(rsp), &rsp);
4842 		return 0;
4843 	}
4844 
4845 	chan->ident = cmd->ident;
4846 
4847 	if (chan->scid < L2CAP_CID_DYN_START ||
4848 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4849 	    (chan->mode != L2CAP_MODE_ERTM &&
4850 	     chan->mode != L2CAP_MODE_STREAMING)) {
4851 		result = L2CAP_MR_NOT_ALLOWED;
4852 		goto send_move_response;
4853 	}
4854 
4855 	if (chan->local_amp_id == req->dest_amp_id) {
4856 		result = L2CAP_MR_SAME_ID;
4857 		goto send_move_response;
4858 	}
4859 
4860 	if (req->dest_amp_id) {
4861 		struct hci_dev *hdev;
4862 		hdev = hci_dev_get(req->dest_amp_id);
4863 		if (!hdev || hdev->dev_type != HCI_AMP ||
4864 		    !test_bit(HCI_UP, &hdev->flags)) {
4865 			if (hdev)
4866 				hci_dev_put(hdev);
4867 
4868 			result = L2CAP_MR_BAD_ID;
4869 			goto send_move_response;
4870 		}
4871 		hci_dev_put(hdev);
4872 	}
4873 
4874 	/* Detect a move collision.  Only send a collision response
4875 	 * if this side has "lost", otherwise proceed with the move.
4876 	 * The winner has the larger bd_addr.
4877 	 */
4878 	if ((__chan_is_moving(chan) ||
4879 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4880 	    bacmp(conn->src, conn->dst) > 0) {
4881 		result = L2CAP_MR_COLLISION;
4882 		goto send_move_response;
4883 	}
4884 
4885 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4886 	l2cap_move_setup(chan);
4887 	chan->move_id = req->dest_amp_id;
4888 	icid = chan->dcid;
4889 
4890 	if (!req->dest_amp_id) {
4891 		/* Moving to BR/EDR */
4892 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4893 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4894 			result = L2CAP_MR_PEND;
4895 		} else {
4896 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4897 			result = L2CAP_MR_SUCCESS;
4898 		}
4899 	} else {
4900 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4901 		/* Placeholder - uncomment when amp functions are available */
4902 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4903 		result = L2CAP_MR_PEND;
4904 	}
4905 
4906 send_move_response:
4907 	l2cap_send_move_chan_rsp(chan, result);
4908 
4909 	l2cap_chan_unlock(chan);
4910 
4911 	return 0;
4912 }
4913 
4914 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4915 {
4916 	struct l2cap_chan *chan;
4917 	struct hci_chan *hchan = NULL;
4918 
4919 	chan = l2cap_get_chan_by_scid(conn, icid);
4920 	if (!chan) {
4921 		l2cap_send_move_chan_cfm_icid(conn, icid);
4922 		return;
4923 	}
4924 
4925 	__clear_chan_timer(chan);
4926 	if (result == L2CAP_MR_PEND)
4927 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4928 
4929 	switch (chan->move_state) {
4930 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4931 		/* Move confirm will be sent when logical link
4932 		 * is complete.
4933 		 */
4934 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4935 		break;
4936 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4937 		if (result == L2CAP_MR_PEND) {
4938 			break;
4939 		} else if (test_bit(CONN_LOCAL_BUSY,
4940 				    &chan->conn_state)) {
4941 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4942 		} else {
4943 			/* Logical link is up or moving to BR/EDR,
4944 			 * proceed with move
4945 			 */
4946 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4947 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4948 		}
4949 		break;
4950 	case L2CAP_MOVE_WAIT_RSP:
4951 		/* Moving to AMP */
4952 		if (result == L2CAP_MR_SUCCESS) {
4953 			/* Remote is ready, send confirm immediately
4954 			 * after logical link is ready
4955 			 */
4956 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4957 		} else {
4958 			/* Both logical link and move success
4959 			 * are required to confirm
4960 			 */
4961 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4962 		}
4963 
4964 		/* Placeholder - get hci_chan for logical link */
4965 		if (!hchan) {
4966 			/* Logical link not available */
4967 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4968 			break;
4969 		}
4970 
4971 		/* If the logical link is not yet connected, do not
4972 		 * send confirmation.
4973 		 */
4974 		if (hchan->state != BT_CONNECTED)
4975 			break;
4976 
4977 		/* Logical link is already ready to go */
4978 
4979 		chan->hs_hcon = hchan->conn;
4980 		chan->hs_hcon->l2cap_data = chan->conn;
4981 
4982 		if (result == L2CAP_MR_SUCCESS) {
4983 			/* Can confirm now */
4984 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4985 		} else {
4986 			/* Now only need move success
4987 			 * to confirm
4988 			 */
4989 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4990 		}
4991 
4992 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4993 		break;
4994 	default:
4995 		/* Any other amp move state means the move failed. */
4996 		chan->move_id = chan->local_amp_id;
4997 		l2cap_move_done(chan);
4998 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4999 	}
5000 
5001 	l2cap_chan_unlock(chan);
5002 }
5003 
5004 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5005 			    u16 result)
5006 {
5007 	struct l2cap_chan *chan;
5008 
5009 	chan = l2cap_get_chan_by_ident(conn, ident);
5010 	if (!chan) {
5011 		/* Could not locate channel, icid is best guess */
5012 		l2cap_send_move_chan_cfm_icid(conn, icid);
5013 		return;
5014 	}
5015 
5016 	__clear_chan_timer(chan);
5017 
5018 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5019 		if (result == L2CAP_MR_COLLISION) {
5020 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5021 		} else {
5022 			/* Cleanup - cancel move */
5023 			chan->move_id = chan->local_amp_id;
5024 			l2cap_move_done(chan);
5025 		}
5026 	}
5027 
5028 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 
5030 	l2cap_chan_unlock(chan);
5031 }
5032 
5033 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5034 				  struct l2cap_cmd_hdr *cmd,
5035 				  u16 cmd_len, void *data)
5036 {
5037 	struct l2cap_move_chan_rsp *rsp = data;
5038 	u16 icid, result;
5039 
5040 	if (cmd_len != sizeof(*rsp))
5041 		return -EPROTO;
5042 
5043 	icid = le16_to_cpu(rsp->icid);
5044 	result = le16_to_cpu(rsp->result);
5045 
5046 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5047 
5048 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5049 		l2cap_move_continue(conn, icid, result);
5050 	else
5051 		l2cap_move_fail(conn, cmd->ident, icid, result);
5052 
5053 	return 0;
5054 }
5055 
5056 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5057 				      struct l2cap_cmd_hdr *cmd,
5058 				      u16 cmd_len, void *data)
5059 {
5060 	struct l2cap_move_chan_cfm *cfm = data;
5061 	struct l2cap_chan *chan;
5062 	u16 icid, result;
5063 
5064 	if (cmd_len != sizeof(*cfm))
5065 		return -EPROTO;
5066 
5067 	icid = le16_to_cpu(cfm->icid);
5068 	result = le16_to_cpu(cfm->result);
5069 
5070 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5071 
5072 	chan = l2cap_get_chan_by_dcid(conn, icid);
5073 	if (!chan) {
5074 		/* Spec requires a response even if the icid was not found */
5075 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5076 		return 0;
5077 	}
5078 
5079 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5080 		if (result == L2CAP_MC_CONFIRMED) {
5081 			chan->local_amp_id = chan->move_id;
5082 			if (!chan->local_amp_id)
5083 				__release_logical_link(chan);
5084 		} else {
5085 			chan->move_id = chan->local_amp_id;
5086 		}
5087 
5088 		l2cap_move_done(chan);
5089 	}
5090 
5091 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5092 
5093 	l2cap_chan_unlock(chan);
5094 
5095 	return 0;
5096 }
5097 
5098 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5099 						 struct l2cap_cmd_hdr *cmd,
5100 						 u16 cmd_len, void *data)
5101 {
5102 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5103 	struct l2cap_chan *chan;
5104 	u16 icid;
5105 
5106 	if (cmd_len != sizeof(*rsp))
5107 		return -EPROTO;
5108 
5109 	icid = le16_to_cpu(rsp->icid);
5110 
5111 	BT_DBG("icid 0x%4.4x", icid);
5112 
5113 	chan = l2cap_get_chan_by_scid(conn, icid);
5114 	if (!chan)
5115 		return 0;
5116 
5117 	__clear_chan_timer(chan);
5118 
5119 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5120 		chan->local_amp_id = chan->move_id;
5121 
5122 		if (!chan->local_amp_id && chan->hs_hchan)
5123 			__release_logical_link(chan);
5124 
5125 		l2cap_move_done(chan);
5126 	}
5127 
5128 	l2cap_chan_unlock(chan);
5129 
5130 	return 0;
5131 }
5132 
5133 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5134 					 u16 to_multiplier)
5135 {
5136 	u16 max_latency;
5137 
5138 	if (min > max || min < 6 || max > 3200)
5139 		return -EINVAL;
5140 
5141 	if (to_multiplier < 10 || to_multiplier > 3200)
5142 		return -EINVAL;
5143 
5144 	if (max >= to_multiplier * 8)
5145 		return -EINVAL;
5146 
5147 	max_latency = (to_multiplier * 8 / max) - 1;
5148 	if (latency > 499 || latency > max_latency)
5149 		return -EINVAL;
5150 
5151 	return 0;
5152 }
5153 
5154 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5155 					      struct l2cap_cmd_hdr *cmd,
5156 					      u8 *data)
5157 {
5158 	struct hci_conn *hcon = conn->hcon;
5159 	struct l2cap_conn_param_update_req *req;
5160 	struct l2cap_conn_param_update_rsp rsp;
5161 	u16 min, max, latency, to_multiplier, cmd_len;
5162 	int err;
5163 
5164 	if (!(hcon->link_mode & HCI_LM_MASTER))
5165 		return -EINVAL;
5166 
5167 	cmd_len = __le16_to_cpu(cmd->len);
5168 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5169 		return -EPROTO;
5170 
5171 	req = (struct l2cap_conn_param_update_req *) data;
5172 	min		= __le16_to_cpu(req->min);
5173 	max		= __le16_to_cpu(req->max);
5174 	latency		= __le16_to_cpu(req->latency);
5175 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5176 
5177 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5178 	       min, max, latency, to_multiplier);
5179 
5180 	memset(&rsp, 0, sizeof(rsp));
5181 
5182 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5183 	if (err)
5184 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5185 	else
5186 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5187 
5188 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5189 		       sizeof(rsp), &rsp);
5190 
5191 	if (!err)
5192 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5193 
5194 	return 0;
5195 }
5196 
5197 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5198 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5199 				      u8 *data)
5200 {
5201 	int err = 0;
5202 
5203 	switch (cmd->code) {
5204 	case L2CAP_COMMAND_REJ:
5205 		l2cap_command_rej(conn, cmd, cmd_len, data);
5206 		break;
5207 
5208 	case L2CAP_CONN_REQ:
5209 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5210 		break;
5211 
5212 	case L2CAP_CONN_RSP:
5213 	case L2CAP_CREATE_CHAN_RSP:
5214 		err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5215 		break;
5216 
5217 	case L2CAP_CONF_REQ:
5218 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5219 		break;
5220 
5221 	case L2CAP_CONF_RSP:
5222 		err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5223 		break;
5224 
5225 	case L2CAP_DISCONN_REQ:
5226 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5227 		break;
5228 
5229 	case L2CAP_DISCONN_RSP:
5230 		err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5231 		break;
5232 
5233 	case L2CAP_ECHO_REQ:
5234 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5235 		break;
5236 
5237 	case L2CAP_ECHO_RSP:
5238 		break;
5239 
5240 	case L2CAP_INFO_REQ:
5241 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5242 		break;
5243 
5244 	case L2CAP_INFO_RSP:
5245 		err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5246 		break;
5247 
5248 	case L2CAP_CREATE_CHAN_REQ:
5249 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5250 		break;
5251 
5252 	case L2CAP_MOVE_CHAN_REQ:
5253 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5254 		break;
5255 
5256 	case L2CAP_MOVE_CHAN_RSP:
5257 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5258 		break;
5259 
5260 	case L2CAP_MOVE_CHAN_CFM:
5261 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5262 		break;
5263 
5264 	case L2CAP_MOVE_CHAN_CFM_RSP:
5265 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5266 		break;
5267 
5268 	default:
5269 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5270 		err = -EINVAL;
5271 		break;
5272 	}
5273 
5274 	return err;
5275 }
5276 
5277 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5278 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5279 {
5280 	switch (cmd->code) {
5281 	case L2CAP_COMMAND_REJ:
5282 		return 0;
5283 
5284 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5285 		return l2cap_conn_param_update_req(conn, cmd, data);
5286 
5287 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5288 		return 0;
5289 
5290 	default:
5291 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5292 		return -EINVAL;
5293 	}
5294 }
5295 
5296 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5297 					struct sk_buff *skb)
5298 {
5299 	u8 *data = skb->data;
5300 	int len = skb->len;
5301 	struct l2cap_cmd_hdr cmd;
5302 	int err;
5303 
5304 	l2cap_raw_recv(conn, skb);
5305 
5306 	while (len >= L2CAP_CMD_HDR_SIZE) {
5307 		u16 cmd_len;
5308 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5309 		data += L2CAP_CMD_HDR_SIZE;
5310 		len  -= L2CAP_CMD_HDR_SIZE;
5311 
5312 		cmd_len = le16_to_cpu(cmd.len);
5313 
5314 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5315 		       cmd.ident);
5316 
5317 		if (cmd_len > len || !cmd.ident) {
5318 			BT_DBG("corrupted command");
5319 			break;
5320 		}
5321 
5322 		err = l2cap_le_sig_cmd(conn, &cmd, data);
5323 		if (err) {
5324 			struct l2cap_cmd_rej_unk rej;
5325 
5326 			BT_ERR("Wrong link type (%d)", err);
5327 
5328 			/* FIXME: Map err to a valid reason */
5329 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5330 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5331 				       sizeof(rej), &rej);
5332 		}
5333 
5334 		data += cmd_len;
5335 		len  -= cmd_len;
5336 	}
5337 
5338 	kfree_skb(skb);
5339 }
5340 
5341 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5342 				     struct sk_buff *skb)
5343 {
5344 	u8 *data = skb->data;
5345 	int len = skb->len;
5346 	struct l2cap_cmd_hdr cmd;
5347 	int err;
5348 
5349 	l2cap_raw_recv(conn, skb);
5350 
5351 	while (len >= L2CAP_CMD_HDR_SIZE) {
5352 		u16 cmd_len;
5353 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5354 		data += L2CAP_CMD_HDR_SIZE;
5355 		len  -= L2CAP_CMD_HDR_SIZE;
5356 
5357 		cmd_len = le16_to_cpu(cmd.len);
5358 
5359 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5360 		       cmd.ident);
5361 
5362 		if (cmd_len > len || !cmd.ident) {
5363 			BT_DBG("corrupted command");
5364 			break;
5365 		}
5366 
5367 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5368 		if (err) {
5369 			struct l2cap_cmd_rej_unk rej;
5370 
5371 			BT_ERR("Wrong link type (%d)", err);
5372 
5373 			/* FIXME: Map err to a valid reason */
5374 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5375 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5376 				       sizeof(rej), &rej);
5377 		}
5378 
5379 		data += cmd_len;
5380 		len  -= cmd_len;
5381 	}
5382 
5383 	kfree_skb(skb);
5384 }
5385 
5386 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5387 {
5388 	u16 our_fcs, rcv_fcs;
5389 	int hdr_size;
5390 
5391 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5392 		hdr_size = L2CAP_EXT_HDR_SIZE;
5393 	else
5394 		hdr_size = L2CAP_ENH_HDR_SIZE;
5395 
5396 	if (chan->fcs == L2CAP_FCS_CRC16) {
5397 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5398 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5399 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5400 
5401 		if (our_fcs != rcv_fcs)
5402 			return -EBADMSG;
5403 	}
5404 	return 0;
5405 }
5406 
5407 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5408 {
5409 	struct l2cap_ctrl control;
5410 
5411 	BT_DBG("chan %p", chan);
5412 
5413 	memset(&control, 0, sizeof(control));
5414 	control.sframe = 1;
5415 	control.final = 1;
5416 	control.reqseq = chan->buffer_seq;
5417 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5418 
5419 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5420 		control.super = L2CAP_SUPER_RNR;
5421 		l2cap_send_sframe(chan, &control);
5422 	}
5423 
5424 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5425 	    chan->unacked_frames > 0)
5426 		__set_retrans_timer(chan);
5427 
5428 	/* Send pending iframes */
5429 	l2cap_ertm_send(chan);
5430 
5431 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5432 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5433 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5434 		 * send it now.
5435 		 */
5436 		control.super = L2CAP_SUPER_RR;
5437 		l2cap_send_sframe(chan, &control);
5438 	}
5439 }
5440 
5441 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5442 			    struct sk_buff **last_frag)
5443 {
5444 	/* skb->len reflects data in skb as well as all fragments
5445 	 * skb->data_len reflects only data in fragments
5446 	 */
5447 	if (!skb_has_frag_list(skb))
5448 		skb_shinfo(skb)->frag_list = new_frag;
5449 
5450 	new_frag->next = NULL;
5451 
5452 	(*last_frag)->next = new_frag;
5453 	*last_frag = new_frag;
5454 
5455 	skb->len += new_frag->len;
5456 	skb->data_len += new_frag->len;
5457 	skb->truesize += new_frag->truesize;
5458 }
5459 
5460 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5461 				struct l2cap_ctrl *control)
5462 {
5463 	int err = -EINVAL;
5464 
5465 	switch (control->sar) {
5466 	case L2CAP_SAR_UNSEGMENTED:
5467 		if (chan->sdu)
5468 			break;
5469 
5470 		err = chan->ops->recv(chan, skb);
5471 		break;
5472 
5473 	case L2CAP_SAR_START:
5474 		if (chan->sdu)
5475 			break;
5476 
5477 		chan->sdu_len = get_unaligned_le16(skb->data);
5478 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5479 
5480 		if (chan->sdu_len > chan->imtu) {
5481 			err = -EMSGSIZE;
5482 			break;
5483 		}
5484 
5485 		if (skb->len >= chan->sdu_len)
5486 			break;
5487 
5488 		chan->sdu = skb;
5489 		chan->sdu_last_frag = skb;
5490 
5491 		skb = NULL;
5492 		err = 0;
5493 		break;
5494 
5495 	case L2CAP_SAR_CONTINUE:
5496 		if (!chan->sdu)
5497 			break;
5498 
5499 		append_skb_frag(chan->sdu, skb,
5500 				&chan->sdu_last_frag);
5501 		skb = NULL;
5502 
5503 		if (chan->sdu->len >= chan->sdu_len)
5504 			break;
5505 
5506 		err = 0;
5507 		break;
5508 
5509 	case L2CAP_SAR_END:
5510 		if (!chan->sdu)
5511 			break;
5512 
5513 		append_skb_frag(chan->sdu, skb,
5514 				&chan->sdu_last_frag);
5515 		skb = NULL;
5516 
5517 		if (chan->sdu->len != chan->sdu_len)
5518 			break;
5519 
5520 		err = chan->ops->recv(chan, chan->sdu);
5521 
5522 		if (!err) {
5523 			/* Reassembly complete */
5524 			chan->sdu = NULL;
5525 			chan->sdu_last_frag = NULL;
5526 			chan->sdu_len = 0;
5527 		}
5528 		break;
5529 	}
5530 
5531 	if (err) {
5532 		kfree_skb(skb);
5533 		kfree_skb(chan->sdu);
5534 		chan->sdu = NULL;
5535 		chan->sdu_last_frag = NULL;
5536 		chan->sdu_len = 0;
5537 	}
5538 
5539 	return err;
5540 }
5541 
5542 static int l2cap_resegment(struct l2cap_chan *chan)
5543 {
5544 	/* Placeholder */
5545 	return 0;
5546 }
5547 
5548 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5549 {
5550 	u8 event;
5551 
5552 	if (chan->mode != L2CAP_MODE_ERTM)
5553 		return;
5554 
5555 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5556 	l2cap_tx(chan, NULL, NULL, event);
5557 }
5558 
5559 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5560 {
5561 	int err = 0;
5562 	/* Pass sequential frames to l2cap_reassemble_sdu()
5563 	 * until a gap is encountered.
5564 	 */
5565 
5566 	BT_DBG("chan %p", chan);
5567 
5568 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5569 		struct sk_buff *skb;
5570 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5571 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5572 
5573 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5574 
5575 		if (!skb)
5576 			break;
5577 
5578 		skb_unlink(skb, &chan->srej_q);
5579 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5580 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5581 		if (err)
5582 			break;
5583 	}
5584 
5585 	if (skb_queue_empty(&chan->srej_q)) {
5586 		chan->rx_state = L2CAP_RX_STATE_RECV;
5587 		l2cap_send_ack(chan);
5588 	}
5589 
5590 	return err;
5591 }
5592 
5593 static void l2cap_handle_srej(struct l2cap_chan *chan,
5594 			      struct l2cap_ctrl *control)
5595 {
5596 	struct sk_buff *skb;
5597 
5598 	BT_DBG("chan %p, control %p", chan, control);
5599 
5600 	if (control->reqseq == chan->next_tx_seq) {
5601 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5602 		l2cap_send_disconn_req(chan, ECONNRESET);
5603 		return;
5604 	}
5605 
5606 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5607 
5608 	if (skb == NULL) {
5609 		BT_DBG("Seq %d not available for retransmission",
5610 		       control->reqseq);
5611 		return;
5612 	}
5613 
5614 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5615 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5616 		l2cap_send_disconn_req(chan, ECONNRESET);
5617 		return;
5618 	}
5619 
5620 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5621 
5622 	if (control->poll) {
5623 		l2cap_pass_to_tx(chan, control);
5624 
5625 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5626 		l2cap_retransmit(chan, control);
5627 		l2cap_ertm_send(chan);
5628 
5629 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5630 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5631 			chan->srej_save_reqseq = control->reqseq;
5632 		}
5633 	} else {
5634 		l2cap_pass_to_tx_fbit(chan, control);
5635 
5636 		if (control->final) {
5637 			if (chan->srej_save_reqseq != control->reqseq ||
5638 			    !test_and_clear_bit(CONN_SREJ_ACT,
5639 						&chan->conn_state))
5640 				l2cap_retransmit(chan, control);
5641 		} else {
5642 			l2cap_retransmit(chan, control);
5643 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5644 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5645 				chan->srej_save_reqseq = control->reqseq;
5646 			}
5647 		}
5648 	}
5649 }
5650 
5651 static void l2cap_handle_rej(struct l2cap_chan *chan,
5652 			     struct l2cap_ctrl *control)
5653 {
5654 	struct sk_buff *skb;
5655 
5656 	BT_DBG("chan %p, control %p", chan, control);
5657 
5658 	if (control->reqseq == chan->next_tx_seq) {
5659 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5660 		l2cap_send_disconn_req(chan, ECONNRESET);
5661 		return;
5662 	}
5663 
5664 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5665 
5666 	if (chan->max_tx && skb &&
5667 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5668 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5669 		l2cap_send_disconn_req(chan, ECONNRESET);
5670 		return;
5671 	}
5672 
5673 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5674 
5675 	l2cap_pass_to_tx(chan, control);
5676 
5677 	if (control->final) {
5678 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5679 			l2cap_retransmit_all(chan, control);
5680 	} else {
5681 		l2cap_retransmit_all(chan, control);
5682 		l2cap_ertm_send(chan);
5683 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5684 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5685 	}
5686 }
5687 
5688 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5689 {
5690 	BT_DBG("chan %p, txseq %d", chan, txseq);
5691 
5692 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5693 	       chan->expected_tx_seq);
5694 
5695 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5696 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5697 		    chan->tx_win) {
5698 			/* See notes below regarding "double poll" and
5699 			 * invalid packets.
5700 			 */
5701 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5702 				BT_DBG("Invalid/Ignore - after SREJ");
5703 				return L2CAP_TXSEQ_INVALID_IGNORE;
5704 			} else {
5705 				BT_DBG("Invalid - in window after SREJ sent");
5706 				return L2CAP_TXSEQ_INVALID;
5707 			}
5708 		}
5709 
5710 		if (chan->srej_list.head == txseq) {
5711 			BT_DBG("Expected SREJ");
5712 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5713 		}
5714 
5715 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5716 			BT_DBG("Duplicate SREJ - txseq already stored");
5717 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5718 		}
5719 
5720 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5721 			BT_DBG("Unexpected SREJ - not requested");
5722 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5723 		}
5724 	}
5725 
5726 	if (chan->expected_tx_seq == txseq) {
5727 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5728 		    chan->tx_win) {
5729 			BT_DBG("Invalid - txseq outside tx window");
5730 			return L2CAP_TXSEQ_INVALID;
5731 		} else {
5732 			BT_DBG("Expected");
5733 			return L2CAP_TXSEQ_EXPECTED;
5734 		}
5735 	}
5736 
5737 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5738 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5739 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5740 		return L2CAP_TXSEQ_DUPLICATE;
5741 	}
5742 
5743 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5744 		/* A source of invalid packets is a "double poll" condition,
5745 		 * where delays cause us to send multiple poll packets.  If
5746 		 * the remote stack receives and processes both polls,
5747 		 * sequence numbers can wrap around in such a way that a
5748 		 * resent frame has a sequence number that looks like new data
5749 		 * with a sequence gap.  This would trigger an erroneous SREJ
5750 		 * request.
5751 		 *
5752 		 * Fortunately, this is impossible with a tx window that's
5753 		 * less than half of the maximum sequence number, which allows
5754 		 * invalid frames to be safely ignored.
5755 		 *
5756 		 * With tx window sizes greater than half of the tx window
5757 		 * maximum, the frame is invalid and cannot be ignored.  This
5758 		 * causes a disconnect.
5759 		 */
5760 
5761 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5762 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5763 			return L2CAP_TXSEQ_INVALID_IGNORE;
5764 		} else {
5765 			BT_DBG("Invalid - txseq outside tx window");
5766 			return L2CAP_TXSEQ_INVALID;
5767 		}
5768 	} else {
5769 		BT_DBG("Unexpected - txseq indicates missing frames");
5770 		return L2CAP_TXSEQ_UNEXPECTED;
5771 	}
5772 }
5773 
5774 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5775 			       struct l2cap_ctrl *control,
5776 			       struct sk_buff *skb, u8 event)
5777 {
5778 	int err = 0;
5779 	bool skb_in_use = 0;
5780 
5781 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5782 	       event);
5783 
5784 	switch (event) {
5785 	case L2CAP_EV_RECV_IFRAME:
5786 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5787 		case L2CAP_TXSEQ_EXPECTED:
5788 			l2cap_pass_to_tx(chan, control);
5789 
5790 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5791 				BT_DBG("Busy, discarding expected seq %d",
5792 				       control->txseq);
5793 				break;
5794 			}
5795 
5796 			chan->expected_tx_seq = __next_seq(chan,
5797 							   control->txseq);
5798 
5799 			chan->buffer_seq = chan->expected_tx_seq;
5800 			skb_in_use = 1;
5801 
5802 			err = l2cap_reassemble_sdu(chan, skb, control);
5803 			if (err)
5804 				break;
5805 
5806 			if (control->final) {
5807 				if (!test_and_clear_bit(CONN_REJ_ACT,
5808 							&chan->conn_state)) {
5809 					control->final = 0;
5810 					l2cap_retransmit_all(chan, control);
5811 					l2cap_ertm_send(chan);
5812 				}
5813 			}
5814 
5815 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5816 				l2cap_send_ack(chan);
5817 			break;
5818 		case L2CAP_TXSEQ_UNEXPECTED:
5819 			l2cap_pass_to_tx(chan, control);
5820 
5821 			/* Can't issue SREJ frames in the local busy state.
5822 			 * Drop this frame, it will be seen as missing
5823 			 * when local busy is exited.
5824 			 */
5825 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5826 				BT_DBG("Busy, discarding unexpected seq %d",
5827 				       control->txseq);
5828 				break;
5829 			}
5830 
5831 			/* There was a gap in the sequence, so an SREJ
5832 			 * must be sent for each missing frame.  The
5833 			 * current frame is stored for later use.
5834 			 */
5835 			skb_queue_tail(&chan->srej_q, skb);
5836 			skb_in_use = 1;
5837 			BT_DBG("Queued %p (queue len %d)", skb,
5838 			       skb_queue_len(&chan->srej_q));
5839 
5840 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5841 			l2cap_seq_list_clear(&chan->srej_list);
5842 			l2cap_send_srej(chan, control->txseq);
5843 
5844 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5845 			break;
5846 		case L2CAP_TXSEQ_DUPLICATE:
5847 			l2cap_pass_to_tx(chan, control);
5848 			break;
5849 		case L2CAP_TXSEQ_INVALID_IGNORE:
5850 			break;
5851 		case L2CAP_TXSEQ_INVALID:
5852 		default:
5853 			l2cap_send_disconn_req(chan, ECONNRESET);
5854 			break;
5855 		}
5856 		break;
5857 	case L2CAP_EV_RECV_RR:
5858 		l2cap_pass_to_tx(chan, control);
5859 		if (control->final) {
5860 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5861 
5862 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5863 			    !__chan_is_moving(chan)) {
5864 				control->final = 0;
5865 				l2cap_retransmit_all(chan, control);
5866 			}
5867 
5868 			l2cap_ertm_send(chan);
5869 		} else if (control->poll) {
5870 			l2cap_send_i_or_rr_or_rnr(chan);
5871 		} else {
5872 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5873 					       &chan->conn_state) &&
5874 			    chan->unacked_frames)
5875 				__set_retrans_timer(chan);
5876 
5877 			l2cap_ertm_send(chan);
5878 		}
5879 		break;
5880 	case L2CAP_EV_RECV_RNR:
5881 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5882 		l2cap_pass_to_tx(chan, control);
5883 		if (control && control->poll) {
5884 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5885 			l2cap_send_rr_or_rnr(chan, 0);
5886 		}
5887 		__clear_retrans_timer(chan);
5888 		l2cap_seq_list_clear(&chan->retrans_list);
5889 		break;
5890 	case L2CAP_EV_RECV_REJ:
5891 		l2cap_handle_rej(chan, control);
5892 		break;
5893 	case L2CAP_EV_RECV_SREJ:
5894 		l2cap_handle_srej(chan, control);
5895 		break;
5896 	default:
5897 		break;
5898 	}
5899 
5900 	if (skb && !skb_in_use) {
5901 		BT_DBG("Freeing %p", skb);
5902 		kfree_skb(skb);
5903 	}
5904 
5905 	return err;
5906 }
5907 
5908 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5909 				    struct l2cap_ctrl *control,
5910 				    struct sk_buff *skb, u8 event)
5911 {
5912 	int err = 0;
5913 	u16 txseq = control->txseq;
5914 	bool skb_in_use = 0;
5915 
5916 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5917 	       event);
5918 
5919 	switch (event) {
5920 	case L2CAP_EV_RECV_IFRAME:
5921 		switch (l2cap_classify_txseq(chan, txseq)) {
5922 		case L2CAP_TXSEQ_EXPECTED:
5923 			/* Keep frame for reassembly later */
5924 			l2cap_pass_to_tx(chan, control);
5925 			skb_queue_tail(&chan->srej_q, skb);
5926 			skb_in_use = 1;
5927 			BT_DBG("Queued %p (queue len %d)", skb,
5928 			       skb_queue_len(&chan->srej_q));
5929 
5930 			chan->expected_tx_seq = __next_seq(chan, txseq);
5931 			break;
5932 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5933 			l2cap_seq_list_pop(&chan->srej_list);
5934 
5935 			l2cap_pass_to_tx(chan, control);
5936 			skb_queue_tail(&chan->srej_q, skb);
5937 			skb_in_use = 1;
5938 			BT_DBG("Queued %p (queue len %d)", skb,
5939 			       skb_queue_len(&chan->srej_q));
5940 
5941 			err = l2cap_rx_queued_iframes(chan);
5942 			if (err)
5943 				break;
5944 
5945 			break;
5946 		case L2CAP_TXSEQ_UNEXPECTED:
5947 			/* Got a frame that can't be reassembled yet.
5948 			 * Save it for later, and send SREJs to cover
5949 			 * the missing frames.
5950 			 */
5951 			skb_queue_tail(&chan->srej_q, skb);
5952 			skb_in_use = 1;
5953 			BT_DBG("Queued %p (queue len %d)", skb,
5954 			       skb_queue_len(&chan->srej_q));
5955 
5956 			l2cap_pass_to_tx(chan, control);
5957 			l2cap_send_srej(chan, control->txseq);
5958 			break;
5959 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5960 			/* This frame was requested with an SREJ, but
5961 			 * some expected retransmitted frames are
5962 			 * missing.  Request retransmission of missing
5963 			 * SREJ'd frames.
5964 			 */
5965 			skb_queue_tail(&chan->srej_q, skb);
5966 			skb_in_use = 1;
5967 			BT_DBG("Queued %p (queue len %d)", skb,
5968 			       skb_queue_len(&chan->srej_q));
5969 
5970 			l2cap_pass_to_tx(chan, control);
5971 			l2cap_send_srej_list(chan, control->txseq);
5972 			break;
5973 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5974 			/* We've already queued this frame.  Drop this copy. */
5975 			l2cap_pass_to_tx(chan, control);
5976 			break;
5977 		case L2CAP_TXSEQ_DUPLICATE:
5978 			/* Expecting a later sequence number, so this frame
5979 			 * was already received.  Ignore it completely.
5980 			 */
5981 			break;
5982 		case L2CAP_TXSEQ_INVALID_IGNORE:
5983 			break;
5984 		case L2CAP_TXSEQ_INVALID:
5985 		default:
5986 			l2cap_send_disconn_req(chan, ECONNRESET);
5987 			break;
5988 		}
5989 		break;
5990 	case L2CAP_EV_RECV_RR:
5991 		l2cap_pass_to_tx(chan, control);
5992 		if (control->final) {
5993 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5994 
5995 			if (!test_and_clear_bit(CONN_REJ_ACT,
5996 						&chan->conn_state)) {
5997 				control->final = 0;
5998 				l2cap_retransmit_all(chan, control);
5999 			}
6000 
6001 			l2cap_ertm_send(chan);
6002 		} else if (control->poll) {
6003 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6004 					       &chan->conn_state) &&
6005 			    chan->unacked_frames) {
6006 				__set_retrans_timer(chan);
6007 			}
6008 
6009 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6010 			l2cap_send_srej_tail(chan);
6011 		} else {
6012 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6013 					       &chan->conn_state) &&
6014 			    chan->unacked_frames)
6015 				__set_retrans_timer(chan);
6016 
6017 			l2cap_send_ack(chan);
6018 		}
6019 		break;
6020 	case L2CAP_EV_RECV_RNR:
6021 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6022 		l2cap_pass_to_tx(chan, control);
6023 		if (control->poll) {
6024 			l2cap_send_srej_tail(chan);
6025 		} else {
6026 			struct l2cap_ctrl rr_control;
6027 			memset(&rr_control, 0, sizeof(rr_control));
6028 			rr_control.sframe = 1;
6029 			rr_control.super = L2CAP_SUPER_RR;
6030 			rr_control.reqseq = chan->buffer_seq;
6031 			l2cap_send_sframe(chan, &rr_control);
6032 		}
6033 
6034 		break;
6035 	case L2CAP_EV_RECV_REJ:
6036 		l2cap_handle_rej(chan, control);
6037 		break;
6038 	case L2CAP_EV_RECV_SREJ:
6039 		l2cap_handle_srej(chan, control);
6040 		break;
6041 	}
6042 
6043 	if (skb && !skb_in_use) {
6044 		BT_DBG("Freeing %p", skb);
6045 		kfree_skb(skb);
6046 	}
6047 
6048 	return err;
6049 }
6050 
6051 static int l2cap_finish_move(struct l2cap_chan *chan)
6052 {
6053 	BT_DBG("chan %p", chan);
6054 
6055 	chan->rx_state = L2CAP_RX_STATE_RECV;
6056 
6057 	if (chan->hs_hcon)
6058 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6059 	else
6060 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6061 
6062 	return l2cap_resegment(chan);
6063 }
6064 
6065 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6066 				 struct l2cap_ctrl *control,
6067 				 struct sk_buff *skb, u8 event)
6068 {
6069 	int err;
6070 
6071 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6072 	       event);
6073 
6074 	if (!control->poll)
6075 		return -EPROTO;
6076 
6077 	l2cap_process_reqseq(chan, control->reqseq);
6078 
6079 	if (!skb_queue_empty(&chan->tx_q))
6080 		chan->tx_send_head = skb_peek(&chan->tx_q);
6081 	else
6082 		chan->tx_send_head = NULL;
6083 
6084 	/* Rewind next_tx_seq to the point expected
6085 	 * by the receiver.
6086 	 */
6087 	chan->next_tx_seq = control->reqseq;
6088 	chan->unacked_frames = 0;
6089 
6090 	err = l2cap_finish_move(chan);
6091 	if (err)
6092 		return err;
6093 
6094 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6095 	l2cap_send_i_or_rr_or_rnr(chan);
6096 
6097 	if (event == L2CAP_EV_RECV_IFRAME)
6098 		return -EPROTO;
6099 
6100 	return l2cap_rx_state_recv(chan, control, NULL, event);
6101 }
6102 
6103 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6104 				 struct l2cap_ctrl *control,
6105 				 struct sk_buff *skb, u8 event)
6106 {
6107 	int err;
6108 
6109 	if (!control->final)
6110 		return -EPROTO;
6111 
6112 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6113 
6114 	chan->rx_state = L2CAP_RX_STATE_RECV;
6115 	l2cap_process_reqseq(chan, control->reqseq);
6116 
6117 	if (!skb_queue_empty(&chan->tx_q))
6118 		chan->tx_send_head = skb_peek(&chan->tx_q);
6119 	else
6120 		chan->tx_send_head = NULL;
6121 
6122 	/* Rewind next_tx_seq to the point expected
6123 	 * by the receiver.
6124 	 */
6125 	chan->next_tx_seq = control->reqseq;
6126 	chan->unacked_frames = 0;
6127 
6128 	if (chan->hs_hcon)
6129 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6130 	else
6131 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6132 
6133 	err = l2cap_resegment(chan);
6134 
6135 	if (!err)
6136 		err = l2cap_rx_state_recv(chan, control, skb, event);
6137 
6138 	return err;
6139 }
6140 
6141 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6142 {
6143 	/* Make sure reqseq is for a packet that has been sent but not acked */
6144 	u16 unacked;
6145 
6146 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6147 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6148 }
6149 
6150 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6151 		    struct sk_buff *skb, u8 event)
6152 {
6153 	int err = 0;
6154 
6155 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6156 	       control, skb, event, chan->rx_state);
6157 
6158 	if (__valid_reqseq(chan, control->reqseq)) {
6159 		switch (chan->rx_state) {
6160 		case L2CAP_RX_STATE_RECV:
6161 			err = l2cap_rx_state_recv(chan, control, skb, event);
6162 			break;
6163 		case L2CAP_RX_STATE_SREJ_SENT:
6164 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6165 						       event);
6166 			break;
6167 		case L2CAP_RX_STATE_WAIT_P:
6168 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6169 			break;
6170 		case L2CAP_RX_STATE_WAIT_F:
6171 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6172 			break;
6173 		default:
6174 			/* shut it down */
6175 			break;
6176 		}
6177 	} else {
6178 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6179 		       control->reqseq, chan->next_tx_seq,
6180 		       chan->expected_ack_seq);
6181 		l2cap_send_disconn_req(chan, ECONNRESET);
6182 	}
6183 
6184 	return err;
6185 }
6186 
6187 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6188 			   struct sk_buff *skb)
6189 {
6190 	int err = 0;
6191 
6192 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6193 	       chan->rx_state);
6194 
6195 	if (l2cap_classify_txseq(chan, control->txseq) ==
6196 	    L2CAP_TXSEQ_EXPECTED) {
6197 		l2cap_pass_to_tx(chan, control);
6198 
6199 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6200 		       __next_seq(chan, chan->buffer_seq));
6201 
6202 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6203 
6204 		l2cap_reassemble_sdu(chan, skb, control);
6205 	} else {
6206 		if (chan->sdu) {
6207 			kfree_skb(chan->sdu);
6208 			chan->sdu = NULL;
6209 		}
6210 		chan->sdu_last_frag = NULL;
6211 		chan->sdu_len = 0;
6212 
6213 		if (skb) {
6214 			BT_DBG("Freeing %p", skb);
6215 			kfree_skb(skb);
6216 		}
6217 	}
6218 
6219 	chan->last_acked_seq = control->txseq;
6220 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6221 
6222 	return err;
6223 }
6224 
6225 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6226 {
6227 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6228 	u16 len;
6229 	u8 event;
6230 
6231 	__unpack_control(chan, skb);
6232 
6233 	len = skb->len;
6234 
6235 	/*
6236 	 * We can just drop the corrupted I-frame here.
6237 	 * Receiver will miss it and start proper recovery
6238 	 * procedures and ask for retransmission.
6239 	 */
6240 	if (l2cap_check_fcs(chan, skb))
6241 		goto drop;
6242 
6243 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6244 		len -= L2CAP_SDULEN_SIZE;
6245 
6246 	if (chan->fcs == L2CAP_FCS_CRC16)
6247 		len -= L2CAP_FCS_SIZE;
6248 
6249 	if (len > chan->mps) {
6250 		l2cap_send_disconn_req(chan, ECONNRESET);
6251 		goto drop;
6252 	}
6253 
6254 	if (!control->sframe) {
6255 		int err;
6256 
6257 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6258 		       control->sar, control->reqseq, control->final,
6259 		       control->txseq);
6260 
6261 		/* Validate F-bit - F=0 always valid, F=1 only
6262 		 * valid in TX WAIT_F
6263 		 */
6264 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6265 			goto drop;
6266 
6267 		if (chan->mode != L2CAP_MODE_STREAMING) {
6268 			event = L2CAP_EV_RECV_IFRAME;
6269 			err = l2cap_rx(chan, control, skb, event);
6270 		} else {
6271 			err = l2cap_stream_rx(chan, control, skb);
6272 		}
6273 
6274 		if (err)
6275 			l2cap_send_disconn_req(chan, ECONNRESET);
6276 	} else {
6277 		const u8 rx_func_to_event[4] = {
6278 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6279 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6280 		};
6281 
6282 		/* Only I-frames are expected in streaming mode */
6283 		if (chan->mode == L2CAP_MODE_STREAMING)
6284 			goto drop;
6285 
6286 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6287 		       control->reqseq, control->final, control->poll,
6288 		       control->super);
6289 
6290 		if (len != 0) {
6291 			BT_ERR("Trailing bytes: %d in sframe", len);
6292 			l2cap_send_disconn_req(chan, ECONNRESET);
6293 			goto drop;
6294 		}
6295 
6296 		/* Validate F and P bits */
6297 		if (control->final && (control->poll ||
6298 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6299 			goto drop;
6300 
6301 		event = rx_func_to_event[control->super];
6302 		if (l2cap_rx(chan, control, skb, event))
6303 			l2cap_send_disconn_req(chan, ECONNRESET);
6304 	}
6305 
6306 	return 0;
6307 
6308 drop:
6309 	kfree_skb(skb);
6310 	return 0;
6311 }
6312 
6313 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6314 			       struct sk_buff *skb)
6315 {
6316 	struct l2cap_chan *chan;
6317 
6318 	chan = l2cap_get_chan_by_scid(conn, cid);
6319 	if (!chan) {
6320 		if (cid == L2CAP_CID_A2MP) {
6321 			chan = a2mp_channel_create(conn, skb);
6322 			if (!chan) {
6323 				kfree_skb(skb);
6324 				return;
6325 			}
6326 
6327 			l2cap_chan_lock(chan);
6328 		} else {
6329 			BT_DBG("unknown cid 0x%4.4x", cid);
6330 			/* Drop packet and return */
6331 			kfree_skb(skb);
6332 			return;
6333 		}
6334 	}
6335 
6336 	BT_DBG("chan %p, len %d", chan, skb->len);
6337 
6338 	if (chan->state != BT_CONNECTED)
6339 		goto drop;
6340 
6341 	switch (chan->mode) {
6342 	case L2CAP_MODE_BASIC:
6343 		/* If socket recv buffers overflows we drop data here
6344 		 * which is *bad* because L2CAP has to be reliable.
6345 		 * But we don't have any other choice. L2CAP doesn't
6346 		 * provide flow control mechanism. */
6347 
6348 		if (chan->imtu < skb->len)
6349 			goto drop;
6350 
6351 		if (!chan->ops->recv(chan, skb))
6352 			goto done;
6353 		break;
6354 
6355 	case L2CAP_MODE_ERTM:
6356 	case L2CAP_MODE_STREAMING:
6357 		l2cap_data_rcv(chan, skb);
6358 		goto done;
6359 
6360 	default:
6361 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6362 		break;
6363 	}
6364 
6365 drop:
6366 	kfree_skb(skb);
6367 
6368 done:
6369 	l2cap_chan_unlock(chan);
6370 }
6371 
6372 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6373 				  struct sk_buff *skb)
6374 {
6375 	struct l2cap_chan *chan;
6376 
6377 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6378 	if (!chan)
6379 		goto drop;
6380 
6381 	BT_DBG("chan %p, len %d", chan, skb->len);
6382 
6383 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6384 		goto drop;
6385 
6386 	if (chan->imtu < skb->len)
6387 		goto drop;
6388 
6389 	if (!chan->ops->recv(chan, skb))
6390 		return;
6391 
6392 drop:
6393 	kfree_skb(skb);
6394 }
6395 
6396 static void l2cap_att_channel(struct l2cap_conn *conn,
6397 			      struct sk_buff *skb)
6398 {
6399 	struct l2cap_chan *chan;
6400 
6401 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6402 					 conn->src, conn->dst);
6403 	if (!chan)
6404 		goto drop;
6405 
6406 	BT_DBG("chan %p, len %d", chan, skb->len);
6407 
6408 	if (chan->imtu < skb->len)
6409 		goto drop;
6410 
6411 	if (!chan->ops->recv(chan, skb))
6412 		return;
6413 
6414 drop:
6415 	kfree_skb(skb);
6416 }
6417 
6418 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6419 {
6420 	struct l2cap_hdr *lh = (void *) skb->data;
6421 	u16 cid, len;
6422 	__le16 psm;
6423 
6424 	skb_pull(skb, L2CAP_HDR_SIZE);
6425 	cid = __le16_to_cpu(lh->cid);
6426 	len = __le16_to_cpu(lh->len);
6427 
6428 	if (len != skb->len) {
6429 		kfree_skb(skb);
6430 		return;
6431 	}
6432 
6433 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6434 
6435 	switch (cid) {
6436 	case L2CAP_CID_LE_SIGNALING:
6437 		l2cap_le_sig_channel(conn, skb);
6438 		break;
6439 	case L2CAP_CID_SIGNALING:
6440 		l2cap_sig_channel(conn, skb);
6441 		break;
6442 
6443 	case L2CAP_CID_CONN_LESS:
6444 		psm = get_unaligned((__le16 *) skb->data);
6445 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6446 		l2cap_conless_channel(conn, psm, skb);
6447 		break;
6448 
6449 	case L2CAP_CID_ATT:
6450 		l2cap_att_channel(conn, skb);
6451 		break;
6452 
6453 	case L2CAP_CID_SMP:
6454 		if (smp_sig_channel(conn, skb))
6455 			l2cap_conn_del(conn->hcon, EACCES);
6456 		break;
6457 
6458 	default:
6459 		l2cap_data_channel(conn, cid, skb);
6460 		break;
6461 	}
6462 }
6463 
6464 /* ---- L2CAP interface with lower layer (HCI) ---- */
6465 
6466 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6467 {
6468 	int exact = 0, lm1 = 0, lm2 = 0;
6469 	struct l2cap_chan *c;
6470 
6471 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6472 
6473 	/* Find listening sockets and check their link_mode */
6474 	read_lock(&chan_list_lock);
6475 	list_for_each_entry(c, &chan_list, global_l) {
6476 		struct sock *sk = c->sk;
6477 
6478 		if (c->state != BT_LISTEN)
6479 			continue;
6480 
6481 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6482 			lm1 |= HCI_LM_ACCEPT;
6483 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6484 				lm1 |= HCI_LM_MASTER;
6485 			exact++;
6486 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6487 			lm2 |= HCI_LM_ACCEPT;
6488 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6489 				lm2 |= HCI_LM_MASTER;
6490 		}
6491 	}
6492 	read_unlock(&chan_list_lock);
6493 
6494 	return exact ? lm1 : lm2;
6495 }
6496 
6497 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6498 {
6499 	struct l2cap_conn *conn;
6500 
6501 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6502 
6503 	if (!status) {
6504 		conn = l2cap_conn_add(hcon);
6505 		if (conn)
6506 			l2cap_conn_ready(conn);
6507 	} else {
6508 		l2cap_conn_del(hcon, bt_to_errno(status));
6509 	}
6510 }
6511 
6512 int l2cap_disconn_ind(struct hci_conn *hcon)
6513 {
6514 	struct l2cap_conn *conn = hcon->l2cap_data;
6515 
6516 	BT_DBG("hcon %p", hcon);
6517 
6518 	if (!conn)
6519 		return HCI_ERROR_REMOTE_USER_TERM;
6520 	return conn->disc_reason;
6521 }
6522 
6523 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6524 {
6525 	BT_DBG("hcon %p reason %d", hcon, reason);
6526 
6527 	l2cap_conn_del(hcon, bt_to_errno(reason));
6528 }
6529 
6530 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6531 {
6532 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6533 		return;
6534 
6535 	if (encrypt == 0x00) {
6536 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6537 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6538 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6539 			l2cap_chan_close(chan, ECONNREFUSED);
6540 	} else {
6541 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6542 			__clear_chan_timer(chan);
6543 	}
6544 }
6545 
6546 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6547 {
6548 	struct l2cap_conn *conn = hcon->l2cap_data;
6549 	struct l2cap_chan *chan;
6550 
6551 	if (!conn)
6552 		return 0;
6553 
6554 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6555 
6556 	if (hcon->type == LE_LINK) {
6557 		if (!status && encrypt)
6558 			smp_distribute_keys(conn, 0);
6559 		cancel_delayed_work(&conn->security_timer);
6560 	}
6561 
6562 	mutex_lock(&conn->chan_lock);
6563 
6564 	list_for_each_entry(chan, &conn->chan_l, list) {
6565 		l2cap_chan_lock(chan);
6566 
6567 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6568 		       state_to_string(chan->state));
6569 
6570 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6571 			l2cap_chan_unlock(chan);
6572 			continue;
6573 		}
6574 
6575 		if (chan->scid == L2CAP_CID_ATT) {
6576 			if (!status && encrypt) {
6577 				chan->sec_level = hcon->sec_level;
6578 				l2cap_chan_ready(chan);
6579 			}
6580 
6581 			l2cap_chan_unlock(chan);
6582 			continue;
6583 		}
6584 
6585 		if (!__l2cap_no_conn_pending(chan)) {
6586 			l2cap_chan_unlock(chan);
6587 			continue;
6588 		}
6589 
6590 		if (!status && (chan->state == BT_CONNECTED ||
6591 				chan->state == BT_CONFIG)) {
6592 			struct sock *sk = chan->sk;
6593 
6594 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6595 			sk->sk_state_change(sk);
6596 
6597 			l2cap_check_encryption(chan, encrypt);
6598 			l2cap_chan_unlock(chan);
6599 			continue;
6600 		}
6601 
6602 		if (chan->state == BT_CONNECT) {
6603 			if (!status) {
6604 				l2cap_start_connection(chan);
6605 			} else {
6606 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6607 			}
6608 		} else if (chan->state == BT_CONNECT2) {
6609 			struct sock *sk = chan->sk;
6610 			struct l2cap_conn_rsp rsp;
6611 			__u16 res, stat;
6612 
6613 			lock_sock(sk);
6614 
6615 			if (!status) {
6616 				if (test_bit(BT_SK_DEFER_SETUP,
6617 					     &bt_sk(sk)->flags)) {
6618 					res = L2CAP_CR_PEND;
6619 					stat = L2CAP_CS_AUTHOR_PEND;
6620 					chan->ops->defer(chan);
6621 				} else {
6622 					__l2cap_state_change(chan, BT_CONFIG);
6623 					res = L2CAP_CR_SUCCESS;
6624 					stat = L2CAP_CS_NO_INFO;
6625 				}
6626 			} else {
6627 				__l2cap_state_change(chan, BT_DISCONN);
6628 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6629 				res = L2CAP_CR_SEC_BLOCK;
6630 				stat = L2CAP_CS_NO_INFO;
6631 			}
6632 
6633 			release_sock(sk);
6634 
6635 			rsp.scid   = cpu_to_le16(chan->dcid);
6636 			rsp.dcid   = cpu_to_le16(chan->scid);
6637 			rsp.result = cpu_to_le16(res);
6638 			rsp.status = cpu_to_le16(stat);
6639 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6640 				       sizeof(rsp), &rsp);
6641 
6642 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6643 			    res == L2CAP_CR_SUCCESS) {
6644 				char buf[128];
6645 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6646 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6647 					       L2CAP_CONF_REQ,
6648 					       l2cap_build_conf_req(chan, buf),
6649 					       buf);
6650 				chan->num_conf_req++;
6651 			}
6652 		}
6653 
6654 		l2cap_chan_unlock(chan);
6655 	}
6656 
6657 	mutex_unlock(&conn->chan_lock);
6658 
6659 	return 0;
6660 }
6661 
6662 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6663 {
6664 	struct l2cap_conn *conn = hcon->l2cap_data;
6665 	struct l2cap_hdr *hdr;
6666 	int len;
6667 
6668 	/* For AMP controller do not create l2cap conn */
6669 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6670 		goto drop;
6671 
6672 	if (!conn)
6673 		conn = l2cap_conn_add(hcon);
6674 
6675 	if (!conn)
6676 		goto drop;
6677 
6678 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6679 
6680 	switch (flags) {
6681 	case ACL_START:
6682 	case ACL_START_NO_FLUSH:
6683 	case ACL_COMPLETE:
6684 		if (conn->rx_len) {
6685 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6686 			kfree_skb(conn->rx_skb);
6687 			conn->rx_skb = NULL;
6688 			conn->rx_len = 0;
6689 			l2cap_conn_unreliable(conn, ECOMM);
6690 		}
6691 
6692 		/* Start fragment always begin with Basic L2CAP header */
6693 		if (skb->len < L2CAP_HDR_SIZE) {
6694 			BT_ERR("Frame is too short (len %d)", skb->len);
6695 			l2cap_conn_unreliable(conn, ECOMM);
6696 			goto drop;
6697 		}
6698 
6699 		hdr = (struct l2cap_hdr *) skb->data;
6700 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6701 
6702 		if (len == skb->len) {
6703 			/* Complete frame received */
6704 			l2cap_recv_frame(conn, skb);
6705 			return 0;
6706 		}
6707 
6708 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6709 
6710 		if (skb->len > len) {
6711 			BT_ERR("Frame is too long (len %d, expected len %d)",
6712 			       skb->len, len);
6713 			l2cap_conn_unreliable(conn, ECOMM);
6714 			goto drop;
6715 		}
6716 
6717 		/* Allocate skb for the complete frame (with header) */
6718 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6719 		if (!conn->rx_skb)
6720 			goto drop;
6721 
6722 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6723 					  skb->len);
6724 		conn->rx_len = len - skb->len;
6725 		break;
6726 
6727 	case ACL_CONT:
6728 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6729 
6730 		if (!conn->rx_len) {
6731 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6732 			l2cap_conn_unreliable(conn, ECOMM);
6733 			goto drop;
6734 		}
6735 
6736 		if (skb->len > conn->rx_len) {
6737 			BT_ERR("Fragment is too long (len %d, expected %d)",
6738 			       skb->len, conn->rx_len);
6739 			kfree_skb(conn->rx_skb);
6740 			conn->rx_skb = NULL;
6741 			conn->rx_len = 0;
6742 			l2cap_conn_unreliable(conn, ECOMM);
6743 			goto drop;
6744 		}
6745 
6746 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6747 					  skb->len);
6748 		conn->rx_len -= skb->len;
6749 
6750 		if (!conn->rx_len) {
6751 			/* Complete frame received */
6752 			l2cap_recv_frame(conn, conn->rx_skb);
6753 			conn->rx_skb = NULL;
6754 		}
6755 		break;
6756 	}
6757 
6758 drop:
6759 	kfree_skb(skb);
6760 	return 0;
6761 }
6762 
6763 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6764 {
6765 	struct l2cap_chan *c;
6766 
6767 	read_lock(&chan_list_lock);
6768 
6769 	list_for_each_entry(c, &chan_list, global_l) {
6770 		struct sock *sk = c->sk;
6771 
6772 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6773 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6774 			   c->state, __le16_to_cpu(c->psm),
6775 			   c->scid, c->dcid, c->imtu, c->omtu,
6776 			   c->sec_level, c->mode);
6777 	}
6778 
6779 	read_unlock(&chan_list_lock);
6780 
6781 	return 0;
6782 }
6783 
6784 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6785 {
6786 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6787 }
6788 
6789 static const struct file_operations l2cap_debugfs_fops = {
6790 	.open		= l2cap_debugfs_open,
6791 	.read		= seq_read,
6792 	.llseek		= seq_lseek,
6793 	.release	= single_release,
6794 };
6795 
6796 static struct dentry *l2cap_debugfs;
6797 
6798 int __init l2cap_init(void)
6799 {
6800 	int err;
6801 
6802 	err = l2cap_init_sockets();
6803 	if (err < 0)
6804 		return err;
6805 
6806 	if (bt_debugfs) {
6807 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6808 						    NULL, &l2cap_debugfs_fops);
6809 		if (!l2cap_debugfs)
6810 			BT_ERR("Failed to create L2CAP debug file");
6811 	}
6812 
6813 	return 0;
6814 }
6815 
6816 void l2cap_exit(void)
6817 {
6818 	debugfs_remove(l2cap_debugfs);
6819 	l2cap_cleanup_sockets();
6820 }
6821 
6822 module_param(disable_ertm, bool, 0644);
6823 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6824