xref: /linux/net/bluetooth/l2cap_core.c (revision d96caf61ace778b56ab189caaf2b2294101878a9)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			if (chan->dcid == L2CAP_CID_ATT)
508 				chan->scid = L2CAP_CID_ATT;
509 			else
510 				chan->scid = l2cap_alloc_cid(conn);
511 		} else {
512 			/* Alloc CID for connection-oriented socket */
513 			chan->scid = l2cap_alloc_cid(conn);
514 			chan->omtu = L2CAP_DEFAULT_MTU;
515 		}
516 		break;
517 
518 	case L2CAP_CHAN_CONN_LESS:
519 		/* Connectionless socket */
520 		chan->scid = L2CAP_CID_CONN_LESS;
521 		chan->dcid = L2CAP_CID_CONN_LESS;
522 		chan->omtu = L2CAP_DEFAULT_MTU;
523 		break;
524 
525 	case L2CAP_CHAN_CONN_FIX_A2MP:
526 		chan->scid = L2CAP_CID_A2MP;
527 		chan->dcid = L2CAP_CID_A2MP;
528 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
529 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
530 		break;
531 
532 	default:
533 		/* Raw socket can send/recv signalling messages only */
534 		chan->scid = L2CAP_CID_SIGNALING;
535 		chan->dcid = L2CAP_CID_SIGNALING;
536 		chan->omtu = L2CAP_DEFAULT_MTU;
537 	}
538 
539 	chan->local_id		= L2CAP_BESTEFFORT_ID;
540 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
541 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
542 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
543 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
544 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
545 
546 	l2cap_chan_hold(chan);
547 
548 	hci_conn_hold(conn->hcon);
549 
550 	list_add(&chan->list, &conn->chan_l);
551 }
552 
553 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
554 {
555 	mutex_lock(&conn->chan_lock);
556 	__l2cap_chan_add(conn, chan);
557 	mutex_unlock(&conn->chan_lock);
558 }
559 
560 void l2cap_chan_del(struct l2cap_chan *chan, int err)
561 {
562 	struct l2cap_conn *conn = chan->conn;
563 
564 	__clear_chan_timer(chan);
565 
566 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
567 
568 	if (conn) {
569 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
570 		/* Delete from channel list */
571 		list_del(&chan->list);
572 
573 		l2cap_chan_put(chan);
574 
575 		chan->conn = NULL;
576 
577 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
578 			hci_conn_drop(conn->hcon);
579 
580 		if (mgr && mgr->bredr_chan == chan)
581 			mgr->bredr_chan = NULL;
582 	}
583 
584 	if (chan->hs_hchan) {
585 		struct hci_chan *hs_hchan = chan->hs_hchan;
586 
587 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
588 		amp_disconnect_logical_link(hs_hchan);
589 	}
590 
591 	chan->ops->teardown(chan, err);
592 
593 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
594 		return;
595 
596 	switch(chan->mode) {
597 	case L2CAP_MODE_BASIC:
598 		break;
599 
600 	case L2CAP_MODE_ERTM:
601 		__clear_retrans_timer(chan);
602 		__clear_monitor_timer(chan);
603 		__clear_ack_timer(chan);
604 
605 		skb_queue_purge(&chan->srej_q);
606 
607 		l2cap_seq_list_free(&chan->srej_list);
608 		l2cap_seq_list_free(&chan->retrans_list);
609 
610 		/* fall through */
611 
612 	case L2CAP_MODE_STREAMING:
613 		skb_queue_purge(&chan->tx_q);
614 		break;
615 	}
616 
617 	return;
618 }
619 
620 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
621 {
622 	struct l2cap_conn *conn = chan->conn;
623 	struct sock *sk = chan->sk;
624 
625 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
626 	       sk);
627 
628 	switch (chan->state) {
629 	case BT_LISTEN:
630 		chan->ops->teardown(chan, 0);
631 		break;
632 
633 	case BT_CONNECTED:
634 	case BT_CONFIG:
635 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
636 		    conn->hcon->type == ACL_LINK) {
637 			__set_chan_timer(chan, sk->sk_sndtimeo);
638 			l2cap_send_disconn_req(chan, reason);
639 		} else
640 			l2cap_chan_del(chan, reason);
641 		break;
642 
643 	case BT_CONNECT2:
644 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
645 		    conn->hcon->type == ACL_LINK) {
646 			struct l2cap_conn_rsp rsp;
647 			__u16 result;
648 
649 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
650 				result = L2CAP_CR_SEC_BLOCK;
651 			else
652 				result = L2CAP_CR_BAD_PSM;
653 			l2cap_state_change(chan, BT_DISCONN);
654 
655 			rsp.scid   = cpu_to_le16(chan->dcid);
656 			rsp.dcid   = cpu_to_le16(chan->scid);
657 			rsp.result = cpu_to_le16(result);
658 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
659 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
660 				       sizeof(rsp), &rsp);
661 		}
662 
663 		l2cap_chan_del(chan, reason);
664 		break;
665 
666 	case BT_CONNECT:
667 	case BT_DISCONN:
668 		l2cap_chan_del(chan, reason);
669 		break;
670 
671 	default:
672 		chan->ops->teardown(chan, 0);
673 		break;
674 	}
675 }
676 
677 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
678 {
679 	if (chan->chan_type == L2CAP_CHAN_RAW) {
680 		switch (chan->sec_level) {
681 		case BT_SECURITY_HIGH:
682 			return HCI_AT_DEDICATED_BONDING_MITM;
683 		case BT_SECURITY_MEDIUM:
684 			return HCI_AT_DEDICATED_BONDING;
685 		default:
686 			return HCI_AT_NO_BONDING;
687 		}
688 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
689 		if (chan->sec_level == BT_SECURITY_LOW)
690 			chan->sec_level = BT_SECURITY_SDP;
691 
692 		if (chan->sec_level == BT_SECURITY_HIGH)
693 			return HCI_AT_NO_BONDING_MITM;
694 		else
695 			return HCI_AT_NO_BONDING;
696 	} else {
697 		switch (chan->sec_level) {
698 		case BT_SECURITY_HIGH:
699 			return HCI_AT_GENERAL_BONDING_MITM;
700 		case BT_SECURITY_MEDIUM:
701 			return HCI_AT_GENERAL_BONDING;
702 		default:
703 			return HCI_AT_NO_BONDING;
704 		}
705 	}
706 }
707 
708 /* Service level security */
709 int l2cap_chan_check_security(struct l2cap_chan *chan)
710 {
711 	struct l2cap_conn *conn = chan->conn;
712 	__u8 auth_type;
713 
714 	auth_type = l2cap_get_auth_type(chan);
715 
716 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
717 }
718 
719 static u8 l2cap_get_ident(struct l2cap_conn *conn)
720 {
721 	u8 id;
722 
723 	/* Get next available identificator.
724 	 *    1 - 128 are used by kernel.
725 	 *  129 - 199 are reserved.
726 	 *  200 - 254 are used by utilities like l2ping, etc.
727 	 */
728 
729 	spin_lock(&conn->lock);
730 
731 	if (++conn->tx_ident > 128)
732 		conn->tx_ident = 1;
733 
734 	id = conn->tx_ident;
735 
736 	spin_unlock(&conn->lock);
737 
738 	return id;
739 }
740 
741 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
742 			   void *data)
743 {
744 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
745 	u8 flags;
746 
747 	BT_DBG("code 0x%2.2x", code);
748 
749 	if (!skb)
750 		return;
751 
752 	if (lmp_no_flush_capable(conn->hcon->hdev))
753 		flags = ACL_START_NO_FLUSH;
754 	else
755 		flags = ACL_START;
756 
757 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
758 	skb->priority = HCI_PRIO_MAX;
759 
760 	hci_send_acl(conn->hchan, skb, flags);
761 }
762 
763 static bool __chan_is_moving(struct l2cap_chan *chan)
764 {
765 	return chan->move_state != L2CAP_MOVE_STABLE &&
766 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
767 }
768 
769 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
770 {
771 	struct hci_conn *hcon = chan->conn->hcon;
772 	u16 flags;
773 
774 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
775 	       skb->priority);
776 
777 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
778 		if (chan->hs_hchan)
779 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
780 		else
781 			kfree_skb(skb);
782 
783 		return;
784 	}
785 
786 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
787 	    lmp_no_flush_capable(hcon->hdev))
788 		flags = ACL_START_NO_FLUSH;
789 	else
790 		flags = ACL_START;
791 
792 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
793 	hci_send_acl(chan->conn->hchan, skb, flags);
794 }
795 
796 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
797 {
798 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
799 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
800 
801 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
802 		/* S-Frame */
803 		control->sframe = 1;
804 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
805 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
806 
807 		control->sar = 0;
808 		control->txseq = 0;
809 	} else {
810 		/* I-Frame */
811 		control->sframe = 0;
812 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
813 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
814 
815 		control->poll = 0;
816 		control->super = 0;
817 	}
818 }
819 
820 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
821 {
822 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
823 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
824 
825 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
826 		/* S-Frame */
827 		control->sframe = 1;
828 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
829 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
830 
831 		control->sar = 0;
832 		control->txseq = 0;
833 	} else {
834 		/* I-Frame */
835 		control->sframe = 0;
836 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
837 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
838 
839 		control->poll = 0;
840 		control->super = 0;
841 	}
842 }
843 
844 static inline void __unpack_control(struct l2cap_chan *chan,
845 				    struct sk_buff *skb)
846 {
847 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
848 		__unpack_extended_control(get_unaligned_le32(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
851 	} else {
852 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
853 					  &bt_cb(skb)->control);
854 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
855 	}
856 }
857 
858 static u32 __pack_extended_control(struct l2cap_ctrl *control)
859 {
860 	u32 packed;
861 
862 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
863 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
864 
865 	if (control->sframe) {
866 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
867 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
868 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
869 	} else {
870 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
871 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
872 	}
873 
874 	return packed;
875 }
876 
877 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
878 {
879 	u16 packed;
880 
881 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
882 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
883 
884 	if (control->sframe) {
885 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
886 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
887 		packed |= L2CAP_CTRL_FRAME_TYPE;
888 	} else {
889 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
890 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
891 	}
892 
893 	return packed;
894 }
895 
896 static inline void __pack_control(struct l2cap_chan *chan,
897 				  struct l2cap_ctrl *control,
898 				  struct sk_buff *skb)
899 {
900 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
901 		put_unaligned_le32(__pack_extended_control(control),
902 				   skb->data + L2CAP_HDR_SIZE);
903 	} else {
904 		put_unaligned_le16(__pack_enhanced_control(control),
905 				   skb->data + L2CAP_HDR_SIZE);
906 	}
907 }
908 
909 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
910 {
911 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
912 		return L2CAP_EXT_HDR_SIZE;
913 	else
914 		return L2CAP_ENH_HDR_SIZE;
915 }
916 
917 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
918 					       u32 control)
919 {
920 	struct sk_buff *skb;
921 	struct l2cap_hdr *lh;
922 	int hlen = __ertm_hdr_size(chan);
923 
924 	if (chan->fcs == L2CAP_FCS_CRC16)
925 		hlen += L2CAP_FCS_SIZE;
926 
927 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
928 
929 	if (!skb)
930 		return ERR_PTR(-ENOMEM);
931 
932 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
933 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
934 	lh->cid = cpu_to_le16(chan->dcid);
935 
936 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
937 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
938 	else
939 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
940 
941 	if (chan->fcs == L2CAP_FCS_CRC16) {
942 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
943 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
944 	}
945 
946 	skb->priority = HCI_PRIO_MAX;
947 	return skb;
948 }
949 
950 static void l2cap_send_sframe(struct l2cap_chan *chan,
951 			      struct l2cap_ctrl *control)
952 {
953 	struct sk_buff *skb;
954 	u32 control_field;
955 
956 	BT_DBG("chan %p, control %p", chan, control);
957 
958 	if (!control->sframe)
959 		return;
960 
961 	if (__chan_is_moving(chan))
962 		return;
963 
964 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
965 	    !control->poll)
966 		control->final = 1;
967 
968 	if (control->super == L2CAP_SUPER_RR)
969 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
970 	else if (control->super == L2CAP_SUPER_RNR)
971 		set_bit(CONN_RNR_SENT, &chan->conn_state);
972 
973 	if (control->super != L2CAP_SUPER_SREJ) {
974 		chan->last_acked_seq = control->reqseq;
975 		__clear_ack_timer(chan);
976 	}
977 
978 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
979 	       control->final, control->poll, control->super);
980 
981 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
982 		control_field = __pack_extended_control(control);
983 	else
984 		control_field = __pack_enhanced_control(control);
985 
986 	skb = l2cap_create_sframe_pdu(chan, control_field);
987 	if (!IS_ERR(skb))
988 		l2cap_do_send(chan, skb);
989 }
990 
991 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
992 {
993 	struct l2cap_ctrl control;
994 
995 	BT_DBG("chan %p, poll %d", chan, poll);
996 
997 	memset(&control, 0, sizeof(control));
998 	control.sframe = 1;
999 	control.poll = poll;
1000 
1001 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1002 		control.super = L2CAP_SUPER_RNR;
1003 	else
1004 		control.super = L2CAP_SUPER_RR;
1005 
1006 	control.reqseq = chan->buffer_seq;
1007 	l2cap_send_sframe(chan, &control);
1008 }
1009 
1010 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1011 {
1012 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1013 }
1014 
1015 static bool __amp_capable(struct l2cap_chan *chan)
1016 {
1017 	struct l2cap_conn *conn = chan->conn;
1018 
1019 	if (conn->hs_enabled && hci_amp_capable() &&
1020 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1021 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1022 		return true;
1023 
1024 	return false;
1025 }
1026 
1027 static bool l2cap_check_efs(struct l2cap_chan *chan)
1028 {
1029 	/* Check EFS parameters */
1030 	return true;
1031 }
1032 
1033 void l2cap_send_conn_req(struct l2cap_chan *chan)
1034 {
1035 	struct l2cap_conn *conn = chan->conn;
1036 	struct l2cap_conn_req req;
1037 
1038 	req.scid = cpu_to_le16(chan->scid);
1039 	req.psm  = chan->psm;
1040 
1041 	chan->ident = l2cap_get_ident(conn);
1042 
1043 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1044 
1045 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1046 }
1047 
1048 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1049 {
1050 	struct l2cap_create_chan_req req;
1051 	req.scid = cpu_to_le16(chan->scid);
1052 	req.psm  = chan->psm;
1053 	req.amp_id = amp_id;
1054 
1055 	chan->ident = l2cap_get_ident(chan->conn);
1056 
1057 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1058 		       sizeof(req), &req);
1059 }
1060 
1061 static void l2cap_move_setup(struct l2cap_chan *chan)
1062 {
1063 	struct sk_buff *skb;
1064 
1065 	BT_DBG("chan %p", chan);
1066 
1067 	if (chan->mode != L2CAP_MODE_ERTM)
1068 		return;
1069 
1070 	__clear_retrans_timer(chan);
1071 	__clear_monitor_timer(chan);
1072 	__clear_ack_timer(chan);
1073 
1074 	chan->retry_count = 0;
1075 	skb_queue_walk(&chan->tx_q, skb) {
1076 		if (bt_cb(skb)->control.retries)
1077 			bt_cb(skb)->control.retries = 1;
1078 		else
1079 			break;
1080 	}
1081 
1082 	chan->expected_tx_seq = chan->buffer_seq;
1083 
1084 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1085 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1086 	l2cap_seq_list_clear(&chan->retrans_list);
1087 	l2cap_seq_list_clear(&chan->srej_list);
1088 	skb_queue_purge(&chan->srej_q);
1089 
1090 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1091 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1092 
1093 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1094 }
1095 
1096 static void l2cap_move_done(struct l2cap_chan *chan)
1097 {
1098 	u8 move_role = chan->move_role;
1099 	BT_DBG("chan %p", chan);
1100 
1101 	chan->move_state = L2CAP_MOVE_STABLE;
1102 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1103 
1104 	if (chan->mode != L2CAP_MODE_ERTM)
1105 		return;
1106 
1107 	switch (move_role) {
1108 	case L2CAP_MOVE_ROLE_INITIATOR:
1109 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1110 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1111 		break;
1112 	case L2CAP_MOVE_ROLE_RESPONDER:
1113 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1114 		break;
1115 	}
1116 }
1117 
1118 static void l2cap_chan_ready(struct l2cap_chan *chan)
1119 {
1120 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1121 	chan->conf_state = 0;
1122 	__clear_chan_timer(chan);
1123 
1124 	chan->state = BT_CONNECTED;
1125 
1126 	chan->ops->ready(chan);
1127 }
1128 
1129 static void l2cap_start_connection(struct l2cap_chan *chan)
1130 {
1131 	if (__amp_capable(chan)) {
1132 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1133 		a2mp_discover_amp(chan);
1134 	} else {
1135 		l2cap_send_conn_req(chan);
1136 	}
1137 }
1138 
1139 static void l2cap_do_start(struct l2cap_chan *chan)
1140 {
1141 	struct l2cap_conn *conn = chan->conn;
1142 
1143 	if (conn->hcon->type == LE_LINK) {
1144 		l2cap_chan_ready(chan);
1145 		return;
1146 	}
1147 
1148 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1149 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1150 			return;
1151 
1152 		if (l2cap_chan_check_security(chan) &&
1153 		    __l2cap_no_conn_pending(chan)) {
1154 			l2cap_start_connection(chan);
1155 		}
1156 	} else {
1157 		struct l2cap_info_req req;
1158 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1159 
1160 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1161 		conn->info_ident = l2cap_get_ident(conn);
1162 
1163 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1164 
1165 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1166 			       sizeof(req), &req);
1167 	}
1168 }
1169 
1170 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1171 {
1172 	u32 local_feat_mask = l2cap_feat_mask;
1173 	if (!disable_ertm)
1174 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1175 
1176 	switch (mode) {
1177 	case L2CAP_MODE_ERTM:
1178 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1179 	case L2CAP_MODE_STREAMING:
1180 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1181 	default:
1182 		return 0x00;
1183 	}
1184 }
1185 
1186 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1187 {
1188 	struct sock *sk = chan->sk;
1189 	struct l2cap_conn *conn = chan->conn;
1190 	struct l2cap_disconn_req req;
1191 
1192 	if (!conn)
1193 		return;
1194 
1195 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1196 		__clear_retrans_timer(chan);
1197 		__clear_monitor_timer(chan);
1198 		__clear_ack_timer(chan);
1199 	}
1200 
1201 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1202 		l2cap_state_change(chan, BT_DISCONN);
1203 		return;
1204 	}
1205 
1206 	req.dcid = cpu_to_le16(chan->dcid);
1207 	req.scid = cpu_to_le16(chan->scid);
1208 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1209 		       sizeof(req), &req);
1210 
1211 	lock_sock(sk);
1212 	__l2cap_state_change(chan, BT_DISCONN);
1213 	__l2cap_chan_set_err(chan, err);
1214 	release_sock(sk);
1215 }
1216 
1217 /* ---- L2CAP connections ---- */
1218 static void l2cap_conn_start(struct l2cap_conn *conn)
1219 {
1220 	struct l2cap_chan *chan, *tmp;
1221 
1222 	BT_DBG("conn %p", conn);
1223 
1224 	mutex_lock(&conn->chan_lock);
1225 
1226 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1227 		struct sock *sk = chan->sk;
1228 
1229 		l2cap_chan_lock(chan);
1230 
1231 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1232 			l2cap_chan_unlock(chan);
1233 			continue;
1234 		}
1235 
1236 		if (chan->state == BT_CONNECT) {
1237 			if (!l2cap_chan_check_security(chan) ||
1238 			    !__l2cap_no_conn_pending(chan)) {
1239 				l2cap_chan_unlock(chan);
1240 				continue;
1241 			}
1242 
1243 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1244 			    && test_bit(CONF_STATE2_DEVICE,
1245 					&chan->conf_state)) {
1246 				l2cap_chan_close(chan, ECONNRESET);
1247 				l2cap_chan_unlock(chan);
1248 				continue;
1249 			}
1250 
1251 			l2cap_start_connection(chan);
1252 
1253 		} else if (chan->state == BT_CONNECT2) {
1254 			struct l2cap_conn_rsp rsp;
1255 			char buf[128];
1256 			rsp.scid = cpu_to_le16(chan->dcid);
1257 			rsp.dcid = cpu_to_le16(chan->scid);
1258 
1259 			if (l2cap_chan_check_security(chan)) {
1260 				lock_sock(sk);
1261 				if (test_bit(BT_SK_DEFER_SETUP,
1262 					     &bt_sk(sk)->flags)) {
1263 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1264 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1265 					chan->ops->defer(chan);
1266 
1267 				} else {
1268 					__l2cap_state_change(chan, BT_CONFIG);
1269 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1270 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1271 				}
1272 				release_sock(sk);
1273 			} else {
1274 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1275 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1276 			}
1277 
1278 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1279 				       sizeof(rsp), &rsp);
1280 
1281 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1282 			    rsp.result != L2CAP_CR_SUCCESS) {
1283 				l2cap_chan_unlock(chan);
1284 				continue;
1285 			}
1286 
1287 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1288 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1289 				       l2cap_build_conf_req(chan, buf), buf);
1290 			chan->num_conf_req++;
1291 		}
1292 
1293 		l2cap_chan_unlock(chan);
1294 	}
1295 
1296 	mutex_unlock(&conn->chan_lock);
1297 }
1298 
1299 /* Find socket with cid and source/destination bdaddr.
1300  * Returns closest match, locked.
1301  */
1302 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1303 						    bdaddr_t *src,
1304 						    bdaddr_t *dst)
1305 {
1306 	struct l2cap_chan *c, *c1 = NULL;
1307 
1308 	read_lock(&chan_list_lock);
1309 
1310 	list_for_each_entry(c, &chan_list, global_l) {
1311 		struct sock *sk = c->sk;
1312 
1313 		if (state && c->state != state)
1314 			continue;
1315 
1316 		if (c->scid == cid) {
1317 			int src_match, dst_match;
1318 			int src_any, dst_any;
1319 
1320 			/* Exact match. */
1321 			src_match = !bacmp(&bt_sk(sk)->src, src);
1322 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1323 			if (src_match && dst_match) {
1324 				read_unlock(&chan_list_lock);
1325 				return c;
1326 			}
1327 
1328 			/* Closest match */
1329 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1330 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1331 			if ((src_match && dst_any) || (src_any && dst_match) ||
1332 			    (src_any && dst_any))
1333 				c1 = c;
1334 		}
1335 	}
1336 
1337 	read_unlock(&chan_list_lock);
1338 
1339 	return c1;
1340 }
1341 
1342 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1343 {
1344 	struct sock *parent;
1345 	struct l2cap_chan *chan, *pchan;
1346 
1347 	BT_DBG("");
1348 
1349 	/* Check if we have socket listening on cid */
1350 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1351 					  conn->src, conn->dst);
1352 	if (!pchan)
1353 		return;
1354 
1355 	/* Client ATT sockets should override the server one */
1356 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1357 		return;
1358 
1359 	parent = pchan->sk;
1360 
1361 	lock_sock(parent);
1362 
1363 	chan = pchan->ops->new_connection(pchan);
1364 	if (!chan)
1365 		goto clean;
1366 
1367 	chan->dcid = L2CAP_CID_ATT;
1368 
1369 	bacpy(&bt_sk(chan->sk)->src, conn->src);
1370 	bacpy(&bt_sk(chan->sk)->dst, conn->dst);
1371 
1372 	__l2cap_chan_add(conn, chan);
1373 
1374 clean:
1375 	release_sock(parent);
1376 }
1377 
1378 static void l2cap_conn_ready(struct l2cap_conn *conn)
1379 {
1380 	struct l2cap_chan *chan;
1381 	struct hci_conn *hcon = conn->hcon;
1382 
1383 	BT_DBG("conn %p", conn);
1384 
1385 	/* For outgoing pairing which doesn't necessarily have an
1386 	 * associated socket (e.g. mgmt_pair_device).
1387 	 */
1388 	if (hcon->out && hcon->type == LE_LINK)
1389 		smp_conn_security(hcon, hcon->pending_sec_level);
1390 
1391 	mutex_lock(&conn->chan_lock);
1392 
1393 	if (hcon->type == LE_LINK)
1394 		l2cap_le_conn_ready(conn);
1395 
1396 	list_for_each_entry(chan, &conn->chan_l, list) {
1397 
1398 		l2cap_chan_lock(chan);
1399 
1400 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1401 			l2cap_chan_unlock(chan);
1402 			continue;
1403 		}
1404 
1405 		if (hcon->type == LE_LINK) {
1406 			if (smp_conn_security(hcon, chan->sec_level))
1407 				l2cap_chan_ready(chan);
1408 
1409 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1410 			struct sock *sk = chan->sk;
1411 			__clear_chan_timer(chan);
1412 			lock_sock(sk);
1413 			__l2cap_state_change(chan, BT_CONNECTED);
1414 			sk->sk_state_change(sk);
1415 			release_sock(sk);
1416 
1417 		} else if (chan->state == BT_CONNECT) {
1418 			l2cap_do_start(chan);
1419 		}
1420 
1421 		l2cap_chan_unlock(chan);
1422 	}
1423 
1424 	mutex_unlock(&conn->chan_lock);
1425 }
1426 
1427 /* Notify sockets that we cannot guaranty reliability anymore */
1428 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1429 {
1430 	struct l2cap_chan *chan;
1431 
1432 	BT_DBG("conn %p", conn);
1433 
1434 	mutex_lock(&conn->chan_lock);
1435 
1436 	list_for_each_entry(chan, &conn->chan_l, list) {
1437 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1438 			l2cap_chan_set_err(chan, err);
1439 	}
1440 
1441 	mutex_unlock(&conn->chan_lock);
1442 }
1443 
1444 static void l2cap_info_timeout(struct work_struct *work)
1445 {
1446 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1447 					       info_timer.work);
1448 
1449 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1450 	conn->info_ident = 0;
1451 
1452 	l2cap_conn_start(conn);
1453 }
1454 
1455 /*
1456  * l2cap_user
1457  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1458  * callback is called during registration. The ->remove callback is called
1459  * during unregistration.
1460  * An l2cap_user object can either be explicitly unregistered or when the
1461  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1462  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1463  * External modules must own a reference to the l2cap_conn object if they intend
1464  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1465  * any time if they don't.
1466  */
1467 
1468 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1469 {
1470 	struct hci_dev *hdev = conn->hcon->hdev;
1471 	int ret;
1472 
1473 	/* We need to check whether l2cap_conn is registered. If it is not, we
1474 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1475 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1476 	 * relies on the parent hci_conn object to be locked. This itself relies
1477 	 * on the hci_dev object to be locked. So we must lock the hci device
1478 	 * here, too. */
1479 
1480 	hci_dev_lock(hdev);
1481 
1482 	if (user->list.next || user->list.prev) {
1483 		ret = -EINVAL;
1484 		goto out_unlock;
1485 	}
1486 
1487 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1488 	if (!conn->hchan) {
1489 		ret = -ENODEV;
1490 		goto out_unlock;
1491 	}
1492 
1493 	ret = user->probe(conn, user);
1494 	if (ret)
1495 		goto out_unlock;
1496 
1497 	list_add(&user->list, &conn->users);
1498 	ret = 0;
1499 
1500 out_unlock:
1501 	hci_dev_unlock(hdev);
1502 	return ret;
1503 }
1504 EXPORT_SYMBOL(l2cap_register_user);
1505 
1506 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1507 {
1508 	struct hci_dev *hdev = conn->hcon->hdev;
1509 
1510 	hci_dev_lock(hdev);
1511 
1512 	if (!user->list.next || !user->list.prev)
1513 		goto out_unlock;
1514 
1515 	list_del(&user->list);
1516 	user->list.next = NULL;
1517 	user->list.prev = NULL;
1518 	user->remove(conn, user);
1519 
1520 out_unlock:
1521 	hci_dev_unlock(hdev);
1522 }
1523 EXPORT_SYMBOL(l2cap_unregister_user);
1524 
1525 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1526 {
1527 	struct l2cap_user *user;
1528 
1529 	while (!list_empty(&conn->users)) {
1530 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1531 		list_del(&user->list);
1532 		user->list.next = NULL;
1533 		user->list.prev = NULL;
1534 		user->remove(conn, user);
1535 	}
1536 }
1537 
1538 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1539 {
1540 	struct l2cap_conn *conn = hcon->l2cap_data;
1541 	struct l2cap_chan *chan, *l;
1542 
1543 	if (!conn)
1544 		return;
1545 
1546 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1547 
1548 	kfree_skb(conn->rx_skb);
1549 
1550 	l2cap_unregister_all_users(conn);
1551 
1552 	mutex_lock(&conn->chan_lock);
1553 
1554 	/* Kill channels */
1555 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1556 		l2cap_chan_hold(chan);
1557 		l2cap_chan_lock(chan);
1558 
1559 		l2cap_chan_del(chan, err);
1560 
1561 		l2cap_chan_unlock(chan);
1562 
1563 		chan->ops->close(chan);
1564 		l2cap_chan_put(chan);
1565 	}
1566 
1567 	mutex_unlock(&conn->chan_lock);
1568 
1569 	hci_chan_del(conn->hchan);
1570 
1571 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1572 		cancel_delayed_work_sync(&conn->info_timer);
1573 
1574 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1575 		cancel_delayed_work_sync(&conn->security_timer);
1576 		smp_chan_destroy(conn);
1577 	}
1578 
1579 	hcon->l2cap_data = NULL;
1580 	conn->hchan = NULL;
1581 	l2cap_conn_put(conn);
1582 }
1583 
1584 static void security_timeout(struct work_struct *work)
1585 {
1586 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1587 					       security_timer.work);
1588 
1589 	BT_DBG("conn %p", conn);
1590 
1591 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1592 		smp_chan_destroy(conn);
1593 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1594 	}
1595 }
1596 
1597 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1598 {
1599 	struct l2cap_conn *conn = hcon->l2cap_data;
1600 	struct hci_chan *hchan;
1601 
1602 	if (conn)
1603 		return conn;
1604 
1605 	hchan = hci_chan_create(hcon);
1606 	if (!hchan)
1607 		return NULL;
1608 
1609 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1610 	if (!conn) {
1611 		hci_chan_del(hchan);
1612 		return NULL;
1613 	}
1614 
1615 	kref_init(&conn->ref);
1616 	hcon->l2cap_data = conn;
1617 	conn->hcon = hcon;
1618 	hci_conn_get(conn->hcon);
1619 	conn->hchan = hchan;
1620 
1621 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1622 
1623 	switch (hcon->type) {
1624 	case LE_LINK:
1625 		if (hcon->hdev->le_mtu) {
1626 			conn->mtu = hcon->hdev->le_mtu;
1627 			break;
1628 		}
1629 		/* fall through */
1630 	default:
1631 		conn->mtu = hcon->hdev->acl_mtu;
1632 		break;
1633 	}
1634 
1635 	conn->src = &hcon->hdev->bdaddr;
1636 	conn->dst = &hcon->dst;
1637 
1638 	conn->feat_mask = 0;
1639 
1640 	if (hcon->type == ACL_LINK)
1641 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1642 					    &hcon->hdev->dev_flags);
1643 
1644 	spin_lock_init(&conn->lock);
1645 	mutex_init(&conn->chan_lock);
1646 
1647 	INIT_LIST_HEAD(&conn->chan_l);
1648 	INIT_LIST_HEAD(&conn->users);
1649 
1650 	if (hcon->type == LE_LINK)
1651 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1652 	else
1653 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1654 
1655 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1656 
1657 	return conn;
1658 }
1659 
1660 static void l2cap_conn_free(struct kref *ref)
1661 {
1662 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1663 
1664 	hci_conn_put(conn->hcon);
1665 	kfree(conn);
1666 }
1667 
1668 void l2cap_conn_get(struct l2cap_conn *conn)
1669 {
1670 	kref_get(&conn->ref);
1671 }
1672 EXPORT_SYMBOL(l2cap_conn_get);
1673 
1674 void l2cap_conn_put(struct l2cap_conn *conn)
1675 {
1676 	kref_put(&conn->ref, l2cap_conn_free);
1677 }
1678 EXPORT_SYMBOL(l2cap_conn_put);
1679 
1680 /* ---- Socket interface ---- */
1681 
1682 /* Find socket with psm and source / destination bdaddr.
1683  * Returns closest match.
1684  */
1685 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1686 						   bdaddr_t *src,
1687 						   bdaddr_t *dst)
1688 {
1689 	struct l2cap_chan *c, *c1 = NULL;
1690 
1691 	read_lock(&chan_list_lock);
1692 
1693 	list_for_each_entry(c, &chan_list, global_l) {
1694 		struct sock *sk = c->sk;
1695 
1696 		if (state && c->state != state)
1697 			continue;
1698 
1699 		if (c->psm == psm) {
1700 			int src_match, dst_match;
1701 			int src_any, dst_any;
1702 
1703 			/* Exact match. */
1704 			src_match = !bacmp(&bt_sk(sk)->src, src);
1705 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1706 			if (src_match && dst_match) {
1707 				read_unlock(&chan_list_lock);
1708 				return c;
1709 			}
1710 
1711 			/* Closest match */
1712 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1713 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1714 			if ((src_match && dst_any) || (src_any && dst_match) ||
1715 			    (src_any && dst_any))
1716 				c1 = c;
1717 		}
1718 	}
1719 
1720 	read_unlock(&chan_list_lock);
1721 
1722 	return c1;
1723 }
1724 
1725 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1726 		       bdaddr_t *dst, u8 dst_type)
1727 {
1728 	struct sock *sk = chan->sk;
1729 	bdaddr_t *src = &bt_sk(sk)->src;
1730 	struct l2cap_conn *conn;
1731 	struct hci_conn *hcon;
1732 	struct hci_dev *hdev;
1733 	__u8 auth_type;
1734 	int err;
1735 
1736 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1737 	       dst_type, __le16_to_cpu(psm));
1738 
1739 	hdev = hci_get_route(dst, src);
1740 	if (!hdev)
1741 		return -EHOSTUNREACH;
1742 
1743 	hci_dev_lock(hdev);
1744 
1745 	l2cap_chan_lock(chan);
1746 
1747 	/* PSM must be odd and lsb of upper byte must be 0 */
1748 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1749 	    chan->chan_type != L2CAP_CHAN_RAW) {
1750 		err = -EINVAL;
1751 		goto done;
1752 	}
1753 
1754 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1755 		err = -EINVAL;
1756 		goto done;
1757 	}
1758 
1759 	switch (chan->mode) {
1760 	case L2CAP_MODE_BASIC:
1761 		break;
1762 	case L2CAP_MODE_ERTM:
1763 	case L2CAP_MODE_STREAMING:
1764 		if (!disable_ertm)
1765 			break;
1766 		/* fall through */
1767 	default:
1768 		err = -ENOTSUPP;
1769 		goto done;
1770 	}
1771 
1772 	switch (chan->state) {
1773 	case BT_CONNECT:
1774 	case BT_CONNECT2:
1775 	case BT_CONFIG:
1776 		/* Already connecting */
1777 		err = 0;
1778 		goto done;
1779 
1780 	case BT_CONNECTED:
1781 		/* Already connected */
1782 		err = -EISCONN;
1783 		goto done;
1784 
1785 	case BT_OPEN:
1786 	case BT_BOUND:
1787 		/* Can connect */
1788 		break;
1789 
1790 	default:
1791 		err = -EBADFD;
1792 		goto done;
1793 	}
1794 
1795 	/* Set destination address and psm */
1796 	lock_sock(sk);
1797 	bacpy(&bt_sk(sk)->dst, dst);
1798 	release_sock(sk);
1799 
1800 	chan->psm = psm;
1801 	chan->dcid = cid;
1802 
1803 	auth_type = l2cap_get_auth_type(chan);
1804 
1805 	if (bdaddr_type_is_le(dst_type))
1806 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1807 				   chan->sec_level, auth_type);
1808 	else
1809 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1810 				   chan->sec_level, auth_type);
1811 
1812 	if (IS_ERR(hcon)) {
1813 		err = PTR_ERR(hcon);
1814 		goto done;
1815 	}
1816 
1817 	conn = l2cap_conn_add(hcon);
1818 	if (!conn) {
1819 		hci_conn_drop(hcon);
1820 		err = -ENOMEM;
1821 		goto done;
1822 	}
1823 
1824 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1825 		hci_conn_drop(hcon);
1826 		err = -EBUSY;
1827 		goto done;
1828 	}
1829 
1830 	/* Update source addr of the socket */
1831 	bacpy(src, conn->src);
1832 
1833 	l2cap_chan_unlock(chan);
1834 	l2cap_chan_add(conn, chan);
1835 	l2cap_chan_lock(chan);
1836 
1837 	/* l2cap_chan_add takes its own ref so we can drop this one */
1838 	hci_conn_drop(hcon);
1839 
1840 	l2cap_state_change(chan, BT_CONNECT);
1841 	__set_chan_timer(chan, sk->sk_sndtimeo);
1842 
1843 	if (hcon->state == BT_CONNECTED) {
1844 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1845 			__clear_chan_timer(chan);
1846 			if (l2cap_chan_check_security(chan))
1847 				l2cap_state_change(chan, BT_CONNECTED);
1848 		} else
1849 			l2cap_do_start(chan);
1850 	}
1851 
1852 	err = 0;
1853 
1854 done:
1855 	l2cap_chan_unlock(chan);
1856 	hci_dev_unlock(hdev);
1857 	hci_dev_put(hdev);
1858 	return err;
1859 }
1860 
1861 int __l2cap_wait_ack(struct sock *sk)
1862 {
1863 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1864 	DECLARE_WAITQUEUE(wait, current);
1865 	int err = 0;
1866 	int timeo = HZ/5;
1867 
1868 	add_wait_queue(sk_sleep(sk), &wait);
1869 	set_current_state(TASK_INTERRUPTIBLE);
1870 	while (chan->unacked_frames > 0 && chan->conn) {
1871 		if (!timeo)
1872 			timeo = HZ/5;
1873 
1874 		if (signal_pending(current)) {
1875 			err = sock_intr_errno(timeo);
1876 			break;
1877 		}
1878 
1879 		release_sock(sk);
1880 		timeo = schedule_timeout(timeo);
1881 		lock_sock(sk);
1882 		set_current_state(TASK_INTERRUPTIBLE);
1883 
1884 		err = sock_error(sk);
1885 		if (err)
1886 			break;
1887 	}
1888 	set_current_state(TASK_RUNNING);
1889 	remove_wait_queue(sk_sleep(sk), &wait);
1890 	return err;
1891 }
1892 
1893 static void l2cap_monitor_timeout(struct work_struct *work)
1894 {
1895 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1896 					       monitor_timer.work);
1897 
1898 	BT_DBG("chan %p", chan);
1899 
1900 	l2cap_chan_lock(chan);
1901 
1902 	if (!chan->conn) {
1903 		l2cap_chan_unlock(chan);
1904 		l2cap_chan_put(chan);
1905 		return;
1906 	}
1907 
1908 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1909 
1910 	l2cap_chan_unlock(chan);
1911 	l2cap_chan_put(chan);
1912 }
1913 
1914 static void l2cap_retrans_timeout(struct work_struct *work)
1915 {
1916 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1917 					       retrans_timer.work);
1918 
1919 	BT_DBG("chan %p", chan);
1920 
1921 	l2cap_chan_lock(chan);
1922 
1923 	if (!chan->conn) {
1924 		l2cap_chan_unlock(chan);
1925 		l2cap_chan_put(chan);
1926 		return;
1927 	}
1928 
1929 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1930 	l2cap_chan_unlock(chan);
1931 	l2cap_chan_put(chan);
1932 }
1933 
1934 static void l2cap_streaming_send(struct l2cap_chan *chan,
1935 				 struct sk_buff_head *skbs)
1936 {
1937 	struct sk_buff *skb;
1938 	struct l2cap_ctrl *control;
1939 
1940 	BT_DBG("chan %p, skbs %p", chan, skbs);
1941 
1942 	if (__chan_is_moving(chan))
1943 		return;
1944 
1945 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1946 
1947 	while (!skb_queue_empty(&chan->tx_q)) {
1948 
1949 		skb = skb_dequeue(&chan->tx_q);
1950 
1951 		bt_cb(skb)->control.retries = 1;
1952 		control = &bt_cb(skb)->control;
1953 
1954 		control->reqseq = 0;
1955 		control->txseq = chan->next_tx_seq;
1956 
1957 		__pack_control(chan, control, skb);
1958 
1959 		if (chan->fcs == L2CAP_FCS_CRC16) {
1960 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1961 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1962 		}
1963 
1964 		l2cap_do_send(chan, skb);
1965 
1966 		BT_DBG("Sent txseq %u", control->txseq);
1967 
1968 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1969 		chan->frames_sent++;
1970 	}
1971 }
1972 
1973 static int l2cap_ertm_send(struct l2cap_chan *chan)
1974 {
1975 	struct sk_buff *skb, *tx_skb;
1976 	struct l2cap_ctrl *control;
1977 	int sent = 0;
1978 
1979 	BT_DBG("chan %p", chan);
1980 
1981 	if (chan->state != BT_CONNECTED)
1982 		return -ENOTCONN;
1983 
1984 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1985 		return 0;
1986 
1987 	if (__chan_is_moving(chan))
1988 		return 0;
1989 
1990 	while (chan->tx_send_head &&
1991 	       chan->unacked_frames < chan->remote_tx_win &&
1992 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1993 
1994 		skb = chan->tx_send_head;
1995 
1996 		bt_cb(skb)->control.retries = 1;
1997 		control = &bt_cb(skb)->control;
1998 
1999 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2000 			control->final = 1;
2001 
2002 		control->reqseq = chan->buffer_seq;
2003 		chan->last_acked_seq = chan->buffer_seq;
2004 		control->txseq = chan->next_tx_seq;
2005 
2006 		__pack_control(chan, control, skb);
2007 
2008 		if (chan->fcs == L2CAP_FCS_CRC16) {
2009 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2010 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2011 		}
2012 
2013 		/* Clone after data has been modified. Data is assumed to be
2014 		   read-only (for locking purposes) on cloned sk_buffs.
2015 		 */
2016 		tx_skb = skb_clone(skb, GFP_KERNEL);
2017 
2018 		if (!tx_skb)
2019 			break;
2020 
2021 		__set_retrans_timer(chan);
2022 
2023 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2024 		chan->unacked_frames++;
2025 		chan->frames_sent++;
2026 		sent++;
2027 
2028 		if (skb_queue_is_last(&chan->tx_q, skb))
2029 			chan->tx_send_head = NULL;
2030 		else
2031 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2032 
2033 		l2cap_do_send(chan, tx_skb);
2034 		BT_DBG("Sent txseq %u", control->txseq);
2035 	}
2036 
2037 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2038 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2039 
2040 	return sent;
2041 }
2042 
2043 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2044 {
2045 	struct l2cap_ctrl control;
2046 	struct sk_buff *skb;
2047 	struct sk_buff *tx_skb;
2048 	u16 seq;
2049 
2050 	BT_DBG("chan %p", chan);
2051 
2052 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2053 		return;
2054 
2055 	if (__chan_is_moving(chan))
2056 		return;
2057 
2058 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2059 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2060 
2061 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2062 		if (!skb) {
2063 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2064 			       seq);
2065 			continue;
2066 		}
2067 
2068 		bt_cb(skb)->control.retries++;
2069 		control = bt_cb(skb)->control;
2070 
2071 		if (chan->max_tx != 0 &&
2072 		    bt_cb(skb)->control.retries > chan->max_tx) {
2073 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2074 			l2cap_send_disconn_req(chan, ECONNRESET);
2075 			l2cap_seq_list_clear(&chan->retrans_list);
2076 			break;
2077 		}
2078 
2079 		control.reqseq = chan->buffer_seq;
2080 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2081 			control.final = 1;
2082 		else
2083 			control.final = 0;
2084 
2085 		if (skb_cloned(skb)) {
2086 			/* Cloned sk_buffs are read-only, so we need a
2087 			 * writeable copy
2088 			 */
2089 			tx_skb = skb_copy(skb, GFP_KERNEL);
2090 		} else {
2091 			tx_skb = skb_clone(skb, GFP_KERNEL);
2092 		}
2093 
2094 		if (!tx_skb) {
2095 			l2cap_seq_list_clear(&chan->retrans_list);
2096 			break;
2097 		}
2098 
2099 		/* Update skb contents */
2100 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2101 			put_unaligned_le32(__pack_extended_control(&control),
2102 					   tx_skb->data + L2CAP_HDR_SIZE);
2103 		} else {
2104 			put_unaligned_le16(__pack_enhanced_control(&control),
2105 					   tx_skb->data + L2CAP_HDR_SIZE);
2106 		}
2107 
2108 		if (chan->fcs == L2CAP_FCS_CRC16) {
2109 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2110 			put_unaligned_le16(fcs, skb_put(tx_skb,
2111 							L2CAP_FCS_SIZE));
2112 		}
2113 
2114 		l2cap_do_send(chan, tx_skb);
2115 
2116 		BT_DBG("Resent txseq %d", control.txseq);
2117 
2118 		chan->last_acked_seq = chan->buffer_seq;
2119 	}
2120 }
2121 
2122 static void l2cap_retransmit(struct l2cap_chan *chan,
2123 			     struct l2cap_ctrl *control)
2124 {
2125 	BT_DBG("chan %p, control %p", chan, control);
2126 
2127 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2128 	l2cap_ertm_resend(chan);
2129 }
2130 
2131 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2132 				 struct l2cap_ctrl *control)
2133 {
2134 	struct sk_buff *skb;
2135 
2136 	BT_DBG("chan %p, control %p", chan, control);
2137 
2138 	if (control->poll)
2139 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2140 
2141 	l2cap_seq_list_clear(&chan->retrans_list);
2142 
2143 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2144 		return;
2145 
2146 	if (chan->unacked_frames) {
2147 		skb_queue_walk(&chan->tx_q, skb) {
2148 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2149 			    skb == chan->tx_send_head)
2150 				break;
2151 		}
2152 
2153 		skb_queue_walk_from(&chan->tx_q, skb) {
2154 			if (skb == chan->tx_send_head)
2155 				break;
2156 
2157 			l2cap_seq_list_append(&chan->retrans_list,
2158 					      bt_cb(skb)->control.txseq);
2159 		}
2160 
2161 		l2cap_ertm_resend(chan);
2162 	}
2163 }
2164 
2165 static void l2cap_send_ack(struct l2cap_chan *chan)
2166 {
2167 	struct l2cap_ctrl control;
2168 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2169 					 chan->last_acked_seq);
2170 	int threshold;
2171 
2172 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2173 	       chan, chan->last_acked_seq, chan->buffer_seq);
2174 
2175 	memset(&control, 0, sizeof(control));
2176 	control.sframe = 1;
2177 
2178 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2179 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2180 		__clear_ack_timer(chan);
2181 		control.super = L2CAP_SUPER_RNR;
2182 		control.reqseq = chan->buffer_seq;
2183 		l2cap_send_sframe(chan, &control);
2184 	} else {
2185 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2186 			l2cap_ertm_send(chan);
2187 			/* If any i-frames were sent, they included an ack */
2188 			if (chan->buffer_seq == chan->last_acked_seq)
2189 				frames_to_ack = 0;
2190 		}
2191 
2192 		/* Ack now if the window is 3/4ths full.
2193 		 * Calculate without mul or div
2194 		 */
2195 		threshold = chan->ack_win;
2196 		threshold += threshold << 1;
2197 		threshold >>= 2;
2198 
2199 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2200 		       threshold);
2201 
2202 		if (frames_to_ack >= threshold) {
2203 			__clear_ack_timer(chan);
2204 			control.super = L2CAP_SUPER_RR;
2205 			control.reqseq = chan->buffer_seq;
2206 			l2cap_send_sframe(chan, &control);
2207 			frames_to_ack = 0;
2208 		}
2209 
2210 		if (frames_to_ack)
2211 			__set_ack_timer(chan);
2212 	}
2213 }
2214 
2215 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2216 					 struct msghdr *msg, int len,
2217 					 int count, struct sk_buff *skb)
2218 {
2219 	struct l2cap_conn *conn = chan->conn;
2220 	struct sk_buff **frag;
2221 	int sent = 0;
2222 
2223 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2224 		return -EFAULT;
2225 
2226 	sent += count;
2227 	len  -= count;
2228 
2229 	/* Continuation fragments (no L2CAP header) */
2230 	frag = &skb_shinfo(skb)->frag_list;
2231 	while (len) {
2232 		struct sk_buff *tmp;
2233 
2234 		count = min_t(unsigned int, conn->mtu, len);
2235 
2236 		tmp = chan->ops->alloc_skb(chan, count,
2237 					   msg->msg_flags & MSG_DONTWAIT);
2238 		if (IS_ERR(tmp))
2239 			return PTR_ERR(tmp);
2240 
2241 		*frag = tmp;
2242 
2243 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2244 			return -EFAULT;
2245 
2246 		(*frag)->priority = skb->priority;
2247 
2248 		sent += count;
2249 		len  -= count;
2250 
2251 		skb->len += (*frag)->len;
2252 		skb->data_len += (*frag)->len;
2253 
2254 		frag = &(*frag)->next;
2255 	}
2256 
2257 	return sent;
2258 }
2259 
2260 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2261 						 struct msghdr *msg, size_t len,
2262 						 u32 priority)
2263 {
2264 	struct l2cap_conn *conn = chan->conn;
2265 	struct sk_buff *skb;
2266 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2267 	struct l2cap_hdr *lh;
2268 
2269 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2270 
2271 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2272 
2273 	skb = chan->ops->alloc_skb(chan, count + hlen,
2274 				   msg->msg_flags & MSG_DONTWAIT);
2275 	if (IS_ERR(skb))
2276 		return skb;
2277 
2278 	skb->priority = priority;
2279 
2280 	/* Create L2CAP header */
2281 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2282 	lh->cid = cpu_to_le16(chan->dcid);
2283 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2284 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2285 
2286 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2287 	if (unlikely(err < 0)) {
2288 		kfree_skb(skb);
2289 		return ERR_PTR(err);
2290 	}
2291 	return skb;
2292 }
2293 
2294 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2295 					      struct msghdr *msg, size_t len,
2296 					      u32 priority)
2297 {
2298 	struct l2cap_conn *conn = chan->conn;
2299 	struct sk_buff *skb;
2300 	int err, count;
2301 	struct l2cap_hdr *lh;
2302 
2303 	BT_DBG("chan %p len %zu", chan, len);
2304 
2305 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2306 
2307 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2308 				   msg->msg_flags & MSG_DONTWAIT);
2309 	if (IS_ERR(skb))
2310 		return skb;
2311 
2312 	skb->priority = priority;
2313 
2314 	/* Create L2CAP header */
2315 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2316 	lh->cid = cpu_to_le16(chan->dcid);
2317 	lh->len = cpu_to_le16(len);
2318 
2319 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2320 	if (unlikely(err < 0)) {
2321 		kfree_skb(skb);
2322 		return ERR_PTR(err);
2323 	}
2324 	return skb;
2325 }
2326 
2327 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2328 					       struct msghdr *msg, size_t len,
2329 					       u16 sdulen)
2330 {
2331 	struct l2cap_conn *conn = chan->conn;
2332 	struct sk_buff *skb;
2333 	int err, count, hlen;
2334 	struct l2cap_hdr *lh;
2335 
2336 	BT_DBG("chan %p len %zu", chan, len);
2337 
2338 	if (!conn)
2339 		return ERR_PTR(-ENOTCONN);
2340 
2341 	hlen = __ertm_hdr_size(chan);
2342 
2343 	if (sdulen)
2344 		hlen += L2CAP_SDULEN_SIZE;
2345 
2346 	if (chan->fcs == L2CAP_FCS_CRC16)
2347 		hlen += L2CAP_FCS_SIZE;
2348 
2349 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2350 
2351 	skb = chan->ops->alloc_skb(chan, count + hlen,
2352 				   msg->msg_flags & MSG_DONTWAIT);
2353 	if (IS_ERR(skb))
2354 		return skb;
2355 
2356 	/* Create L2CAP header */
2357 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2358 	lh->cid = cpu_to_le16(chan->dcid);
2359 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2360 
2361 	/* Control header is populated later */
2362 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2363 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2364 	else
2365 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2366 
2367 	if (sdulen)
2368 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2369 
2370 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2371 	if (unlikely(err < 0)) {
2372 		kfree_skb(skb);
2373 		return ERR_PTR(err);
2374 	}
2375 
2376 	bt_cb(skb)->control.fcs = chan->fcs;
2377 	bt_cb(skb)->control.retries = 0;
2378 	return skb;
2379 }
2380 
2381 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2382 			     struct sk_buff_head *seg_queue,
2383 			     struct msghdr *msg, size_t len)
2384 {
2385 	struct sk_buff *skb;
2386 	u16 sdu_len;
2387 	size_t pdu_len;
2388 	u8 sar;
2389 
2390 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2391 
2392 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2393 	 * so fragmented skbs are not used.  The HCI layer's handling
2394 	 * of fragmented skbs is not compatible with ERTM's queueing.
2395 	 */
2396 
2397 	/* PDU size is derived from the HCI MTU */
2398 	pdu_len = chan->conn->mtu;
2399 
2400 	/* Constrain PDU size for BR/EDR connections */
2401 	if (!chan->hs_hcon)
2402 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2403 
2404 	/* Adjust for largest possible L2CAP overhead. */
2405 	if (chan->fcs)
2406 		pdu_len -= L2CAP_FCS_SIZE;
2407 
2408 	pdu_len -= __ertm_hdr_size(chan);
2409 
2410 	/* Remote device may have requested smaller PDUs */
2411 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2412 
2413 	if (len <= pdu_len) {
2414 		sar = L2CAP_SAR_UNSEGMENTED;
2415 		sdu_len = 0;
2416 		pdu_len = len;
2417 	} else {
2418 		sar = L2CAP_SAR_START;
2419 		sdu_len = len;
2420 		pdu_len -= L2CAP_SDULEN_SIZE;
2421 	}
2422 
2423 	while (len > 0) {
2424 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2425 
2426 		if (IS_ERR(skb)) {
2427 			__skb_queue_purge(seg_queue);
2428 			return PTR_ERR(skb);
2429 		}
2430 
2431 		bt_cb(skb)->control.sar = sar;
2432 		__skb_queue_tail(seg_queue, skb);
2433 
2434 		len -= pdu_len;
2435 		if (sdu_len) {
2436 			sdu_len = 0;
2437 			pdu_len += L2CAP_SDULEN_SIZE;
2438 		}
2439 
2440 		if (len <= pdu_len) {
2441 			sar = L2CAP_SAR_END;
2442 			pdu_len = len;
2443 		} else {
2444 			sar = L2CAP_SAR_CONTINUE;
2445 		}
2446 	}
2447 
2448 	return 0;
2449 }
2450 
2451 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2452 		    u32 priority)
2453 {
2454 	struct sk_buff *skb;
2455 	int err;
2456 	struct sk_buff_head seg_queue;
2457 
2458 	/* Connectionless channel */
2459 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2460 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2461 		if (IS_ERR(skb))
2462 			return PTR_ERR(skb);
2463 
2464 		l2cap_do_send(chan, skb);
2465 		return len;
2466 	}
2467 
2468 	switch (chan->mode) {
2469 	case L2CAP_MODE_BASIC:
2470 		/* Check outgoing MTU */
2471 		if (len > chan->omtu)
2472 			return -EMSGSIZE;
2473 
2474 		/* Create a basic PDU */
2475 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2476 		if (IS_ERR(skb))
2477 			return PTR_ERR(skb);
2478 
2479 		l2cap_do_send(chan, skb);
2480 		err = len;
2481 		break;
2482 
2483 	case L2CAP_MODE_ERTM:
2484 	case L2CAP_MODE_STREAMING:
2485 		/* Check outgoing MTU */
2486 		if (len > chan->omtu) {
2487 			err = -EMSGSIZE;
2488 			break;
2489 		}
2490 
2491 		__skb_queue_head_init(&seg_queue);
2492 
2493 		/* Do segmentation before calling in to the state machine,
2494 		 * since it's possible to block while waiting for memory
2495 		 * allocation.
2496 		 */
2497 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2498 
2499 		/* The channel could have been closed while segmenting,
2500 		 * check that it is still connected.
2501 		 */
2502 		if (chan->state != BT_CONNECTED) {
2503 			__skb_queue_purge(&seg_queue);
2504 			err = -ENOTCONN;
2505 		}
2506 
2507 		if (err)
2508 			break;
2509 
2510 		if (chan->mode == L2CAP_MODE_ERTM)
2511 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2512 		else
2513 			l2cap_streaming_send(chan, &seg_queue);
2514 
2515 		err = len;
2516 
2517 		/* If the skbs were not queued for sending, they'll still be in
2518 		 * seg_queue and need to be purged.
2519 		 */
2520 		__skb_queue_purge(&seg_queue);
2521 		break;
2522 
2523 	default:
2524 		BT_DBG("bad state %1.1x", chan->mode);
2525 		err = -EBADFD;
2526 	}
2527 
2528 	return err;
2529 }
2530 
2531 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2532 {
2533 	struct l2cap_ctrl control;
2534 	u16 seq;
2535 
2536 	BT_DBG("chan %p, txseq %u", chan, txseq);
2537 
2538 	memset(&control, 0, sizeof(control));
2539 	control.sframe = 1;
2540 	control.super = L2CAP_SUPER_SREJ;
2541 
2542 	for (seq = chan->expected_tx_seq; seq != txseq;
2543 	     seq = __next_seq(chan, seq)) {
2544 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2545 			control.reqseq = seq;
2546 			l2cap_send_sframe(chan, &control);
2547 			l2cap_seq_list_append(&chan->srej_list, seq);
2548 		}
2549 	}
2550 
2551 	chan->expected_tx_seq = __next_seq(chan, txseq);
2552 }
2553 
2554 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2555 {
2556 	struct l2cap_ctrl control;
2557 
2558 	BT_DBG("chan %p", chan);
2559 
2560 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2561 		return;
2562 
2563 	memset(&control, 0, sizeof(control));
2564 	control.sframe = 1;
2565 	control.super = L2CAP_SUPER_SREJ;
2566 	control.reqseq = chan->srej_list.tail;
2567 	l2cap_send_sframe(chan, &control);
2568 }
2569 
2570 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2571 {
2572 	struct l2cap_ctrl control;
2573 	u16 initial_head;
2574 	u16 seq;
2575 
2576 	BT_DBG("chan %p, txseq %u", chan, txseq);
2577 
2578 	memset(&control, 0, sizeof(control));
2579 	control.sframe = 1;
2580 	control.super = L2CAP_SUPER_SREJ;
2581 
2582 	/* Capture initial list head to allow only one pass through the list. */
2583 	initial_head = chan->srej_list.head;
2584 
2585 	do {
2586 		seq = l2cap_seq_list_pop(&chan->srej_list);
2587 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2588 			break;
2589 
2590 		control.reqseq = seq;
2591 		l2cap_send_sframe(chan, &control);
2592 		l2cap_seq_list_append(&chan->srej_list, seq);
2593 	} while (chan->srej_list.head != initial_head);
2594 }
2595 
2596 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2597 {
2598 	struct sk_buff *acked_skb;
2599 	u16 ackseq;
2600 
2601 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2602 
2603 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2604 		return;
2605 
2606 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2607 	       chan->expected_ack_seq, chan->unacked_frames);
2608 
2609 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2610 	     ackseq = __next_seq(chan, ackseq)) {
2611 
2612 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2613 		if (acked_skb) {
2614 			skb_unlink(acked_skb, &chan->tx_q);
2615 			kfree_skb(acked_skb);
2616 			chan->unacked_frames--;
2617 		}
2618 	}
2619 
2620 	chan->expected_ack_seq = reqseq;
2621 
2622 	if (chan->unacked_frames == 0)
2623 		__clear_retrans_timer(chan);
2624 
2625 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2626 }
2627 
2628 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2629 {
2630 	BT_DBG("chan %p", chan);
2631 
2632 	chan->expected_tx_seq = chan->buffer_seq;
2633 	l2cap_seq_list_clear(&chan->srej_list);
2634 	skb_queue_purge(&chan->srej_q);
2635 	chan->rx_state = L2CAP_RX_STATE_RECV;
2636 }
2637 
2638 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2639 				struct l2cap_ctrl *control,
2640 				struct sk_buff_head *skbs, u8 event)
2641 {
2642 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2643 	       event);
2644 
2645 	switch (event) {
2646 	case L2CAP_EV_DATA_REQUEST:
2647 		if (chan->tx_send_head == NULL)
2648 			chan->tx_send_head = skb_peek(skbs);
2649 
2650 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2651 		l2cap_ertm_send(chan);
2652 		break;
2653 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2654 		BT_DBG("Enter LOCAL_BUSY");
2655 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2656 
2657 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2658 			/* The SREJ_SENT state must be aborted if we are to
2659 			 * enter the LOCAL_BUSY state.
2660 			 */
2661 			l2cap_abort_rx_srej_sent(chan);
2662 		}
2663 
2664 		l2cap_send_ack(chan);
2665 
2666 		break;
2667 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2668 		BT_DBG("Exit LOCAL_BUSY");
2669 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2670 
2671 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2672 			struct l2cap_ctrl local_control;
2673 
2674 			memset(&local_control, 0, sizeof(local_control));
2675 			local_control.sframe = 1;
2676 			local_control.super = L2CAP_SUPER_RR;
2677 			local_control.poll = 1;
2678 			local_control.reqseq = chan->buffer_seq;
2679 			l2cap_send_sframe(chan, &local_control);
2680 
2681 			chan->retry_count = 1;
2682 			__set_monitor_timer(chan);
2683 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2684 		}
2685 		break;
2686 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2687 		l2cap_process_reqseq(chan, control->reqseq);
2688 		break;
2689 	case L2CAP_EV_EXPLICIT_POLL:
2690 		l2cap_send_rr_or_rnr(chan, 1);
2691 		chan->retry_count = 1;
2692 		__set_monitor_timer(chan);
2693 		__clear_ack_timer(chan);
2694 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2695 		break;
2696 	case L2CAP_EV_RETRANS_TO:
2697 		l2cap_send_rr_or_rnr(chan, 1);
2698 		chan->retry_count = 1;
2699 		__set_monitor_timer(chan);
2700 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 		break;
2702 	case L2CAP_EV_RECV_FBIT:
2703 		/* Nothing to process */
2704 		break;
2705 	default:
2706 		break;
2707 	}
2708 }
2709 
2710 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2711 				  struct l2cap_ctrl *control,
2712 				  struct sk_buff_head *skbs, u8 event)
2713 {
2714 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2715 	       event);
2716 
2717 	switch (event) {
2718 	case L2CAP_EV_DATA_REQUEST:
2719 		if (chan->tx_send_head == NULL)
2720 			chan->tx_send_head = skb_peek(skbs);
2721 		/* Queue data, but don't send. */
2722 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2723 		break;
2724 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2725 		BT_DBG("Enter LOCAL_BUSY");
2726 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2727 
2728 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2729 			/* The SREJ_SENT state must be aborted if we are to
2730 			 * enter the LOCAL_BUSY state.
2731 			 */
2732 			l2cap_abort_rx_srej_sent(chan);
2733 		}
2734 
2735 		l2cap_send_ack(chan);
2736 
2737 		break;
2738 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2739 		BT_DBG("Exit LOCAL_BUSY");
2740 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2741 
2742 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2743 			struct l2cap_ctrl local_control;
2744 			memset(&local_control, 0, sizeof(local_control));
2745 			local_control.sframe = 1;
2746 			local_control.super = L2CAP_SUPER_RR;
2747 			local_control.poll = 1;
2748 			local_control.reqseq = chan->buffer_seq;
2749 			l2cap_send_sframe(chan, &local_control);
2750 
2751 			chan->retry_count = 1;
2752 			__set_monitor_timer(chan);
2753 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 		}
2755 		break;
2756 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 		l2cap_process_reqseq(chan, control->reqseq);
2758 
2759 		/* Fall through */
2760 
2761 	case L2CAP_EV_RECV_FBIT:
2762 		if (control && control->final) {
2763 			__clear_monitor_timer(chan);
2764 			if (chan->unacked_frames > 0)
2765 				__set_retrans_timer(chan);
2766 			chan->retry_count = 0;
2767 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2768 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2769 		}
2770 		break;
2771 	case L2CAP_EV_EXPLICIT_POLL:
2772 		/* Ignore */
2773 		break;
2774 	case L2CAP_EV_MONITOR_TO:
2775 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2776 			l2cap_send_rr_or_rnr(chan, 1);
2777 			__set_monitor_timer(chan);
2778 			chan->retry_count++;
2779 		} else {
2780 			l2cap_send_disconn_req(chan, ECONNABORTED);
2781 		}
2782 		break;
2783 	default:
2784 		break;
2785 	}
2786 }
2787 
2788 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2789 		     struct sk_buff_head *skbs, u8 event)
2790 {
2791 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2792 	       chan, control, skbs, event, chan->tx_state);
2793 
2794 	switch (chan->tx_state) {
2795 	case L2CAP_TX_STATE_XMIT:
2796 		l2cap_tx_state_xmit(chan, control, skbs, event);
2797 		break;
2798 	case L2CAP_TX_STATE_WAIT_F:
2799 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2800 		break;
2801 	default:
2802 		/* Ignore event */
2803 		break;
2804 	}
2805 }
2806 
2807 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2808 			     struct l2cap_ctrl *control)
2809 {
2810 	BT_DBG("chan %p, control %p", chan, control);
2811 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2812 }
2813 
2814 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2815 				  struct l2cap_ctrl *control)
2816 {
2817 	BT_DBG("chan %p, control %p", chan, control);
2818 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2819 }
2820 
2821 /* Copy frame to all raw sockets on that connection */
2822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2823 {
2824 	struct sk_buff *nskb;
2825 	struct l2cap_chan *chan;
2826 
2827 	BT_DBG("conn %p", conn);
2828 
2829 	mutex_lock(&conn->chan_lock);
2830 
2831 	list_for_each_entry(chan, &conn->chan_l, list) {
2832 		struct sock *sk = chan->sk;
2833 		if (chan->chan_type != L2CAP_CHAN_RAW)
2834 			continue;
2835 
2836 		/* Don't send frame to the socket it came from */
2837 		if (skb->sk == sk)
2838 			continue;
2839 		nskb = skb_clone(skb, GFP_KERNEL);
2840 		if (!nskb)
2841 			continue;
2842 
2843 		if (chan->ops->recv(chan, nskb))
2844 			kfree_skb(nskb);
2845 	}
2846 
2847 	mutex_unlock(&conn->chan_lock);
2848 }
2849 
2850 /* ---- L2CAP signalling commands ---- */
2851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2852 				       u8 ident, u16 dlen, void *data)
2853 {
2854 	struct sk_buff *skb, **frag;
2855 	struct l2cap_cmd_hdr *cmd;
2856 	struct l2cap_hdr *lh;
2857 	int len, count;
2858 
2859 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2860 	       conn, code, ident, dlen);
2861 
2862 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2863 		return NULL;
2864 
2865 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2866 	count = min_t(unsigned int, conn->mtu, len);
2867 
2868 	skb = bt_skb_alloc(count, GFP_KERNEL);
2869 	if (!skb)
2870 		return NULL;
2871 
2872 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2873 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2874 
2875 	if (conn->hcon->type == LE_LINK)
2876 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2877 	else
2878 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2879 
2880 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2881 	cmd->code  = code;
2882 	cmd->ident = ident;
2883 	cmd->len   = cpu_to_le16(dlen);
2884 
2885 	if (dlen) {
2886 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2887 		memcpy(skb_put(skb, count), data, count);
2888 		data += count;
2889 	}
2890 
2891 	len -= skb->len;
2892 
2893 	/* Continuation fragments (no L2CAP header) */
2894 	frag = &skb_shinfo(skb)->frag_list;
2895 	while (len) {
2896 		count = min_t(unsigned int, conn->mtu, len);
2897 
2898 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2899 		if (!*frag)
2900 			goto fail;
2901 
2902 		memcpy(skb_put(*frag, count), data, count);
2903 
2904 		len  -= count;
2905 		data += count;
2906 
2907 		frag = &(*frag)->next;
2908 	}
2909 
2910 	return skb;
2911 
2912 fail:
2913 	kfree_skb(skb);
2914 	return NULL;
2915 }
2916 
2917 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2918 				     unsigned long *val)
2919 {
2920 	struct l2cap_conf_opt *opt = *ptr;
2921 	int len;
2922 
2923 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2924 	*ptr += len;
2925 
2926 	*type = opt->type;
2927 	*olen = opt->len;
2928 
2929 	switch (opt->len) {
2930 	case 1:
2931 		*val = *((u8 *) opt->val);
2932 		break;
2933 
2934 	case 2:
2935 		*val = get_unaligned_le16(opt->val);
2936 		break;
2937 
2938 	case 4:
2939 		*val = get_unaligned_le32(opt->val);
2940 		break;
2941 
2942 	default:
2943 		*val = (unsigned long) opt->val;
2944 		break;
2945 	}
2946 
2947 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2948 	return len;
2949 }
2950 
2951 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2952 {
2953 	struct l2cap_conf_opt *opt = *ptr;
2954 
2955 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2956 
2957 	opt->type = type;
2958 	opt->len  = len;
2959 
2960 	switch (len) {
2961 	case 1:
2962 		*((u8 *) opt->val)  = val;
2963 		break;
2964 
2965 	case 2:
2966 		put_unaligned_le16(val, opt->val);
2967 		break;
2968 
2969 	case 4:
2970 		put_unaligned_le32(val, opt->val);
2971 		break;
2972 
2973 	default:
2974 		memcpy(opt->val, (void *) val, len);
2975 		break;
2976 	}
2977 
2978 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2979 }
2980 
2981 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2982 {
2983 	struct l2cap_conf_efs efs;
2984 
2985 	switch (chan->mode) {
2986 	case L2CAP_MODE_ERTM:
2987 		efs.id		= chan->local_id;
2988 		efs.stype	= chan->local_stype;
2989 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2990 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2991 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2992 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2993 		break;
2994 
2995 	case L2CAP_MODE_STREAMING:
2996 		efs.id		= 1;
2997 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2998 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2999 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3000 		efs.acc_lat	= 0;
3001 		efs.flush_to	= 0;
3002 		break;
3003 
3004 	default:
3005 		return;
3006 	}
3007 
3008 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3009 			   (unsigned long) &efs);
3010 }
3011 
3012 static void l2cap_ack_timeout(struct work_struct *work)
3013 {
3014 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3015 					       ack_timer.work);
3016 	u16 frames_to_ack;
3017 
3018 	BT_DBG("chan %p", chan);
3019 
3020 	l2cap_chan_lock(chan);
3021 
3022 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3023 				     chan->last_acked_seq);
3024 
3025 	if (frames_to_ack)
3026 		l2cap_send_rr_or_rnr(chan, 0);
3027 
3028 	l2cap_chan_unlock(chan);
3029 	l2cap_chan_put(chan);
3030 }
3031 
3032 int l2cap_ertm_init(struct l2cap_chan *chan)
3033 {
3034 	int err;
3035 
3036 	chan->next_tx_seq = 0;
3037 	chan->expected_tx_seq = 0;
3038 	chan->expected_ack_seq = 0;
3039 	chan->unacked_frames = 0;
3040 	chan->buffer_seq = 0;
3041 	chan->frames_sent = 0;
3042 	chan->last_acked_seq = 0;
3043 	chan->sdu = NULL;
3044 	chan->sdu_last_frag = NULL;
3045 	chan->sdu_len = 0;
3046 
3047 	skb_queue_head_init(&chan->tx_q);
3048 
3049 	chan->local_amp_id = 0;
3050 	chan->move_id = 0;
3051 	chan->move_state = L2CAP_MOVE_STABLE;
3052 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3053 
3054 	if (chan->mode != L2CAP_MODE_ERTM)
3055 		return 0;
3056 
3057 	chan->rx_state = L2CAP_RX_STATE_RECV;
3058 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3059 
3060 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3061 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3062 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3063 
3064 	skb_queue_head_init(&chan->srej_q);
3065 
3066 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3067 	if (err < 0)
3068 		return err;
3069 
3070 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3071 	if (err < 0)
3072 		l2cap_seq_list_free(&chan->srej_list);
3073 
3074 	return err;
3075 }
3076 
3077 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3078 {
3079 	switch (mode) {
3080 	case L2CAP_MODE_STREAMING:
3081 	case L2CAP_MODE_ERTM:
3082 		if (l2cap_mode_supported(mode, remote_feat_mask))
3083 			return mode;
3084 		/* fall through */
3085 	default:
3086 		return L2CAP_MODE_BASIC;
3087 	}
3088 }
3089 
3090 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3091 {
3092 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3093 }
3094 
3095 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3096 {
3097 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3098 }
3099 
3100 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3101 				      struct l2cap_conf_rfc *rfc)
3102 {
3103 	if (chan->local_amp_id && chan->hs_hcon) {
3104 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3105 
3106 		/* Class 1 devices have must have ERTM timeouts
3107 		 * exceeding the Link Supervision Timeout.  The
3108 		 * default Link Supervision Timeout for AMP
3109 		 * controllers is 10 seconds.
3110 		 *
3111 		 * Class 1 devices use 0xffffffff for their
3112 		 * best-effort flush timeout, so the clamping logic
3113 		 * will result in a timeout that meets the above
3114 		 * requirement.  ERTM timeouts are 16-bit values, so
3115 		 * the maximum timeout is 65.535 seconds.
3116 		 */
3117 
3118 		/* Convert timeout to milliseconds and round */
3119 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3120 
3121 		/* This is the recommended formula for class 2 devices
3122 		 * that start ERTM timers when packets are sent to the
3123 		 * controller.
3124 		 */
3125 		ertm_to = 3 * ertm_to + 500;
3126 
3127 		if (ertm_to > 0xffff)
3128 			ertm_to = 0xffff;
3129 
3130 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3131 		rfc->monitor_timeout = rfc->retrans_timeout;
3132 	} else {
3133 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3134 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3135 	}
3136 }
3137 
3138 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3139 {
3140 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3141 	    __l2cap_ews_supported(chan->conn)) {
3142 		/* use extended control field */
3143 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3144 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3145 	} else {
3146 		chan->tx_win = min_t(u16, chan->tx_win,
3147 				     L2CAP_DEFAULT_TX_WINDOW);
3148 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3149 	}
3150 	chan->ack_win = chan->tx_win;
3151 }
3152 
3153 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3154 {
3155 	struct l2cap_conf_req *req = data;
3156 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3157 	void *ptr = req->data;
3158 	u16 size;
3159 
3160 	BT_DBG("chan %p", chan);
3161 
3162 	if (chan->num_conf_req || chan->num_conf_rsp)
3163 		goto done;
3164 
3165 	switch (chan->mode) {
3166 	case L2CAP_MODE_STREAMING:
3167 	case L2CAP_MODE_ERTM:
3168 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3169 			break;
3170 
3171 		if (__l2cap_efs_supported(chan->conn))
3172 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3173 
3174 		/* fall through */
3175 	default:
3176 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3177 		break;
3178 	}
3179 
3180 done:
3181 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3182 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3183 
3184 	switch (chan->mode) {
3185 	case L2CAP_MODE_BASIC:
3186 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3187 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3188 			break;
3189 
3190 		rfc.mode            = L2CAP_MODE_BASIC;
3191 		rfc.txwin_size      = 0;
3192 		rfc.max_transmit    = 0;
3193 		rfc.retrans_timeout = 0;
3194 		rfc.monitor_timeout = 0;
3195 		rfc.max_pdu_size    = 0;
3196 
3197 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3198 				   (unsigned long) &rfc);
3199 		break;
3200 
3201 	case L2CAP_MODE_ERTM:
3202 		rfc.mode            = L2CAP_MODE_ERTM;
3203 		rfc.max_transmit    = chan->max_tx;
3204 
3205 		__l2cap_set_ertm_timeouts(chan, &rfc);
3206 
3207 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3208 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3209 			     L2CAP_FCS_SIZE);
3210 		rfc.max_pdu_size = cpu_to_le16(size);
3211 
3212 		l2cap_txwin_setup(chan);
3213 
3214 		rfc.txwin_size = min_t(u16, chan->tx_win,
3215 				       L2CAP_DEFAULT_TX_WINDOW);
3216 
3217 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3218 				   (unsigned long) &rfc);
3219 
3220 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3221 			l2cap_add_opt_efs(&ptr, chan);
3222 
3223 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3224 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3225 					   chan->tx_win);
3226 
3227 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3228 			if (chan->fcs == L2CAP_FCS_NONE ||
3229 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3230 				chan->fcs = L2CAP_FCS_NONE;
3231 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3232 						   chan->fcs);
3233 			}
3234 		break;
3235 
3236 	case L2CAP_MODE_STREAMING:
3237 		l2cap_txwin_setup(chan);
3238 		rfc.mode            = L2CAP_MODE_STREAMING;
3239 		rfc.txwin_size      = 0;
3240 		rfc.max_transmit    = 0;
3241 		rfc.retrans_timeout = 0;
3242 		rfc.monitor_timeout = 0;
3243 
3244 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3245 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3246 			     L2CAP_FCS_SIZE);
3247 		rfc.max_pdu_size = cpu_to_le16(size);
3248 
3249 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3250 				   (unsigned long) &rfc);
3251 
3252 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3253 			l2cap_add_opt_efs(&ptr, chan);
3254 
3255 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3256 			if (chan->fcs == L2CAP_FCS_NONE ||
3257 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3258 				chan->fcs = L2CAP_FCS_NONE;
3259 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3260 						   chan->fcs);
3261 			}
3262 		break;
3263 	}
3264 
3265 	req->dcid  = cpu_to_le16(chan->dcid);
3266 	req->flags = __constant_cpu_to_le16(0);
3267 
3268 	return ptr - data;
3269 }
3270 
3271 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3272 {
3273 	struct l2cap_conf_rsp *rsp = data;
3274 	void *ptr = rsp->data;
3275 	void *req = chan->conf_req;
3276 	int len = chan->conf_len;
3277 	int type, hint, olen;
3278 	unsigned long val;
3279 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3280 	struct l2cap_conf_efs efs;
3281 	u8 remote_efs = 0;
3282 	u16 mtu = L2CAP_DEFAULT_MTU;
3283 	u16 result = L2CAP_CONF_SUCCESS;
3284 	u16 size;
3285 
3286 	BT_DBG("chan %p", chan);
3287 
3288 	while (len >= L2CAP_CONF_OPT_SIZE) {
3289 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3290 
3291 		hint  = type & L2CAP_CONF_HINT;
3292 		type &= L2CAP_CONF_MASK;
3293 
3294 		switch (type) {
3295 		case L2CAP_CONF_MTU:
3296 			mtu = val;
3297 			break;
3298 
3299 		case L2CAP_CONF_FLUSH_TO:
3300 			chan->flush_to = val;
3301 			break;
3302 
3303 		case L2CAP_CONF_QOS:
3304 			break;
3305 
3306 		case L2CAP_CONF_RFC:
3307 			if (olen == sizeof(rfc))
3308 				memcpy(&rfc, (void *) val, olen);
3309 			break;
3310 
3311 		case L2CAP_CONF_FCS:
3312 			if (val == L2CAP_FCS_NONE)
3313 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3314 			break;
3315 
3316 		case L2CAP_CONF_EFS:
3317 			remote_efs = 1;
3318 			if (olen == sizeof(efs))
3319 				memcpy(&efs, (void *) val, olen);
3320 			break;
3321 
3322 		case L2CAP_CONF_EWS:
3323 			if (!chan->conn->hs_enabled)
3324 				return -ECONNREFUSED;
3325 
3326 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3327 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3328 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3329 			chan->remote_tx_win = val;
3330 			break;
3331 
3332 		default:
3333 			if (hint)
3334 				break;
3335 
3336 			result = L2CAP_CONF_UNKNOWN;
3337 			*((u8 *) ptr++) = type;
3338 			break;
3339 		}
3340 	}
3341 
3342 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3343 		goto done;
3344 
3345 	switch (chan->mode) {
3346 	case L2CAP_MODE_STREAMING:
3347 	case L2CAP_MODE_ERTM:
3348 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3349 			chan->mode = l2cap_select_mode(rfc.mode,
3350 						       chan->conn->feat_mask);
3351 			break;
3352 		}
3353 
3354 		if (remote_efs) {
3355 			if (__l2cap_efs_supported(chan->conn))
3356 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3357 			else
3358 				return -ECONNREFUSED;
3359 		}
3360 
3361 		if (chan->mode != rfc.mode)
3362 			return -ECONNREFUSED;
3363 
3364 		break;
3365 	}
3366 
3367 done:
3368 	if (chan->mode != rfc.mode) {
3369 		result = L2CAP_CONF_UNACCEPT;
3370 		rfc.mode = chan->mode;
3371 
3372 		if (chan->num_conf_rsp == 1)
3373 			return -ECONNREFUSED;
3374 
3375 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3376 				   (unsigned long) &rfc);
3377 	}
3378 
3379 	if (result == L2CAP_CONF_SUCCESS) {
3380 		/* Configure output options and let the other side know
3381 		 * which ones we don't like. */
3382 
3383 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3384 			result = L2CAP_CONF_UNACCEPT;
3385 		else {
3386 			chan->omtu = mtu;
3387 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3388 		}
3389 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3390 
3391 		if (remote_efs) {
3392 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3393 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3394 			    efs.stype != chan->local_stype) {
3395 
3396 				result = L2CAP_CONF_UNACCEPT;
3397 
3398 				if (chan->num_conf_req >= 1)
3399 					return -ECONNREFUSED;
3400 
3401 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3402 						   sizeof(efs),
3403 						   (unsigned long) &efs);
3404 			} else {
3405 				/* Send PENDING Conf Rsp */
3406 				result = L2CAP_CONF_PENDING;
3407 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3408 			}
3409 		}
3410 
3411 		switch (rfc.mode) {
3412 		case L2CAP_MODE_BASIC:
3413 			chan->fcs = L2CAP_FCS_NONE;
3414 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3415 			break;
3416 
3417 		case L2CAP_MODE_ERTM:
3418 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3419 				chan->remote_tx_win = rfc.txwin_size;
3420 			else
3421 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3422 
3423 			chan->remote_max_tx = rfc.max_transmit;
3424 
3425 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3426 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3427 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3428 			rfc.max_pdu_size = cpu_to_le16(size);
3429 			chan->remote_mps = size;
3430 
3431 			__l2cap_set_ertm_timeouts(chan, &rfc);
3432 
3433 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3434 
3435 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3436 					   sizeof(rfc), (unsigned long) &rfc);
3437 
3438 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3439 				chan->remote_id = efs.id;
3440 				chan->remote_stype = efs.stype;
3441 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3442 				chan->remote_flush_to =
3443 					le32_to_cpu(efs.flush_to);
3444 				chan->remote_acc_lat =
3445 					le32_to_cpu(efs.acc_lat);
3446 				chan->remote_sdu_itime =
3447 					le32_to_cpu(efs.sdu_itime);
3448 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3449 						   sizeof(efs),
3450 						   (unsigned long) &efs);
3451 			}
3452 			break;
3453 
3454 		case L2CAP_MODE_STREAMING:
3455 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3456 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3457 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3458 			rfc.max_pdu_size = cpu_to_le16(size);
3459 			chan->remote_mps = size;
3460 
3461 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3462 
3463 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3464 					   (unsigned long) &rfc);
3465 
3466 			break;
3467 
3468 		default:
3469 			result = L2CAP_CONF_UNACCEPT;
3470 
3471 			memset(&rfc, 0, sizeof(rfc));
3472 			rfc.mode = chan->mode;
3473 		}
3474 
3475 		if (result == L2CAP_CONF_SUCCESS)
3476 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3477 	}
3478 	rsp->scid   = cpu_to_le16(chan->dcid);
3479 	rsp->result = cpu_to_le16(result);
3480 	rsp->flags  = __constant_cpu_to_le16(0);
3481 
3482 	return ptr - data;
3483 }
3484 
3485 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3486 				void *data, u16 *result)
3487 {
3488 	struct l2cap_conf_req *req = data;
3489 	void *ptr = req->data;
3490 	int type, olen;
3491 	unsigned long val;
3492 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3493 	struct l2cap_conf_efs efs;
3494 
3495 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3496 
3497 	while (len >= L2CAP_CONF_OPT_SIZE) {
3498 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3499 
3500 		switch (type) {
3501 		case L2CAP_CONF_MTU:
3502 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3503 				*result = L2CAP_CONF_UNACCEPT;
3504 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3505 			} else
3506 				chan->imtu = val;
3507 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3508 			break;
3509 
3510 		case L2CAP_CONF_FLUSH_TO:
3511 			chan->flush_to = val;
3512 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3513 					   2, chan->flush_to);
3514 			break;
3515 
3516 		case L2CAP_CONF_RFC:
3517 			if (olen == sizeof(rfc))
3518 				memcpy(&rfc, (void *)val, olen);
3519 
3520 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3521 			    rfc.mode != chan->mode)
3522 				return -ECONNREFUSED;
3523 
3524 			chan->fcs = 0;
3525 
3526 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3527 					   sizeof(rfc), (unsigned long) &rfc);
3528 			break;
3529 
3530 		case L2CAP_CONF_EWS:
3531 			chan->ack_win = min_t(u16, val, chan->ack_win);
3532 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3533 					   chan->tx_win);
3534 			break;
3535 
3536 		case L2CAP_CONF_EFS:
3537 			if (olen == sizeof(efs))
3538 				memcpy(&efs, (void *)val, olen);
3539 
3540 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3541 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3542 			    efs.stype != chan->local_stype)
3543 				return -ECONNREFUSED;
3544 
3545 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3546 					   (unsigned long) &efs);
3547 			break;
3548 
3549 		case L2CAP_CONF_FCS:
3550 			if (*result == L2CAP_CONF_PENDING)
3551 				if (val == L2CAP_FCS_NONE)
3552 					set_bit(CONF_RECV_NO_FCS,
3553 						&chan->conf_state);
3554 			break;
3555 		}
3556 	}
3557 
3558 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3559 		return -ECONNREFUSED;
3560 
3561 	chan->mode = rfc.mode;
3562 
3563 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3564 		switch (rfc.mode) {
3565 		case L2CAP_MODE_ERTM:
3566 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3567 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3568 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3569 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3570 				chan->ack_win = min_t(u16, chan->ack_win,
3571 						      rfc.txwin_size);
3572 
3573 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3574 				chan->local_msdu = le16_to_cpu(efs.msdu);
3575 				chan->local_sdu_itime =
3576 					le32_to_cpu(efs.sdu_itime);
3577 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3578 				chan->local_flush_to =
3579 					le32_to_cpu(efs.flush_to);
3580 			}
3581 			break;
3582 
3583 		case L2CAP_MODE_STREAMING:
3584 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3585 		}
3586 	}
3587 
3588 	req->dcid   = cpu_to_le16(chan->dcid);
3589 	req->flags  = __constant_cpu_to_le16(0);
3590 
3591 	return ptr - data;
3592 }
3593 
3594 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3595 				u16 result, u16 flags)
3596 {
3597 	struct l2cap_conf_rsp *rsp = data;
3598 	void *ptr = rsp->data;
3599 
3600 	BT_DBG("chan %p", chan);
3601 
3602 	rsp->scid   = cpu_to_le16(chan->dcid);
3603 	rsp->result = cpu_to_le16(result);
3604 	rsp->flags  = cpu_to_le16(flags);
3605 
3606 	return ptr - data;
3607 }
3608 
3609 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3610 {
3611 	struct l2cap_conn_rsp rsp;
3612 	struct l2cap_conn *conn = chan->conn;
3613 	u8 buf[128];
3614 	u8 rsp_code;
3615 
3616 	rsp.scid   = cpu_to_le16(chan->dcid);
3617 	rsp.dcid   = cpu_to_le16(chan->scid);
3618 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3619 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3620 
3621 	if (chan->hs_hcon)
3622 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3623 	else
3624 		rsp_code = L2CAP_CONN_RSP;
3625 
3626 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3627 
3628 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3629 
3630 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3631 		return;
3632 
3633 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3634 		       l2cap_build_conf_req(chan, buf), buf);
3635 	chan->num_conf_req++;
3636 }
3637 
3638 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3639 {
3640 	int type, olen;
3641 	unsigned long val;
3642 	/* Use sane default values in case a misbehaving remote device
3643 	 * did not send an RFC or extended window size option.
3644 	 */
3645 	u16 txwin_ext = chan->ack_win;
3646 	struct l2cap_conf_rfc rfc = {
3647 		.mode = chan->mode,
3648 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3649 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3650 		.max_pdu_size = cpu_to_le16(chan->imtu),
3651 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3652 	};
3653 
3654 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3655 
3656 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3657 		return;
3658 
3659 	while (len >= L2CAP_CONF_OPT_SIZE) {
3660 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3661 
3662 		switch (type) {
3663 		case L2CAP_CONF_RFC:
3664 			if (olen == sizeof(rfc))
3665 				memcpy(&rfc, (void *)val, olen);
3666 			break;
3667 		case L2CAP_CONF_EWS:
3668 			txwin_ext = val;
3669 			break;
3670 		}
3671 	}
3672 
3673 	switch (rfc.mode) {
3674 	case L2CAP_MODE_ERTM:
3675 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3676 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3677 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3678 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3679 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3680 		else
3681 			chan->ack_win = min_t(u16, chan->ack_win,
3682 					      rfc.txwin_size);
3683 		break;
3684 	case L2CAP_MODE_STREAMING:
3685 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3686 	}
3687 }
3688 
3689 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3690 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3691 				    u8 *data)
3692 {
3693 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3694 
3695 	if (cmd_len < sizeof(*rej))
3696 		return -EPROTO;
3697 
3698 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3699 		return 0;
3700 
3701 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3702 	    cmd->ident == conn->info_ident) {
3703 		cancel_delayed_work(&conn->info_timer);
3704 
3705 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3706 		conn->info_ident = 0;
3707 
3708 		l2cap_conn_start(conn);
3709 	}
3710 
3711 	return 0;
3712 }
3713 
3714 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3715 					struct l2cap_cmd_hdr *cmd,
3716 					u8 *data, u8 rsp_code, u8 amp_id)
3717 {
3718 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3719 	struct l2cap_conn_rsp rsp;
3720 	struct l2cap_chan *chan = NULL, *pchan;
3721 	struct sock *parent, *sk = NULL;
3722 	int result, status = L2CAP_CS_NO_INFO;
3723 
3724 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3725 	__le16 psm = req->psm;
3726 
3727 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3728 
3729 	/* Check if we have socket listening on psm */
3730 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3731 	if (!pchan) {
3732 		result = L2CAP_CR_BAD_PSM;
3733 		goto sendresp;
3734 	}
3735 
3736 	parent = pchan->sk;
3737 
3738 	mutex_lock(&conn->chan_lock);
3739 	lock_sock(parent);
3740 
3741 	/* Check if the ACL is secure enough (if not SDP) */
3742 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3743 	    !hci_conn_check_link_mode(conn->hcon)) {
3744 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3745 		result = L2CAP_CR_SEC_BLOCK;
3746 		goto response;
3747 	}
3748 
3749 	result = L2CAP_CR_NO_MEM;
3750 
3751 	/* Check if we already have channel with that dcid */
3752 	if (__l2cap_get_chan_by_dcid(conn, scid))
3753 		goto response;
3754 
3755 	chan = pchan->ops->new_connection(pchan);
3756 	if (!chan)
3757 		goto response;
3758 
3759 	sk = chan->sk;
3760 
3761 	/* For certain devices (ex: HID mouse), support for authentication,
3762 	 * pairing and bonding is optional. For such devices, inorder to avoid
3763 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3764 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3765 	 */
3766 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3767 
3768 	bacpy(&bt_sk(sk)->src, conn->src);
3769 	bacpy(&bt_sk(sk)->dst, conn->dst);
3770 	chan->psm  = psm;
3771 	chan->dcid = scid;
3772 	chan->local_amp_id = amp_id;
3773 
3774 	__l2cap_chan_add(conn, chan);
3775 
3776 	dcid = chan->scid;
3777 
3778 	__set_chan_timer(chan, sk->sk_sndtimeo);
3779 
3780 	chan->ident = cmd->ident;
3781 
3782 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3783 		if (l2cap_chan_check_security(chan)) {
3784 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3785 				__l2cap_state_change(chan, BT_CONNECT2);
3786 				result = L2CAP_CR_PEND;
3787 				status = L2CAP_CS_AUTHOR_PEND;
3788 				chan->ops->defer(chan);
3789 			} else {
3790 				/* Force pending result for AMP controllers.
3791 				 * The connection will succeed after the
3792 				 * physical link is up.
3793 				 */
3794 				if (amp_id) {
3795 					__l2cap_state_change(chan, BT_CONNECT2);
3796 					result = L2CAP_CR_PEND;
3797 				} else {
3798 					__l2cap_state_change(chan, BT_CONFIG);
3799 					result = L2CAP_CR_SUCCESS;
3800 				}
3801 				status = L2CAP_CS_NO_INFO;
3802 			}
3803 		} else {
3804 			__l2cap_state_change(chan, BT_CONNECT2);
3805 			result = L2CAP_CR_PEND;
3806 			status = L2CAP_CS_AUTHEN_PEND;
3807 		}
3808 	} else {
3809 		__l2cap_state_change(chan, BT_CONNECT2);
3810 		result = L2CAP_CR_PEND;
3811 		status = L2CAP_CS_NO_INFO;
3812 	}
3813 
3814 response:
3815 	release_sock(parent);
3816 	mutex_unlock(&conn->chan_lock);
3817 
3818 sendresp:
3819 	rsp.scid   = cpu_to_le16(scid);
3820 	rsp.dcid   = cpu_to_le16(dcid);
3821 	rsp.result = cpu_to_le16(result);
3822 	rsp.status = cpu_to_le16(status);
3823 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3824 
3825 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3826 		struct l2cap_info_req info;
3827 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3828 
3829 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3830 		conn->info_ident = l2cap_get_ident(conn);
3831 
3832 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3833 
3834 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3835 			       sizeof(info), &info);
3836 	}
3837 
3838 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3839 	    result == L2CAP_CR_SUCCESS) {
3840 		u8 buf[128];
3841 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3842 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3843 			       l2cap_build_conf_req(chan, buf), buf);
3844 		chan->num_conf_req++;
3845 	}
3846 
3847 	return chan;
3848 }
3849 
3850 static int l2cap_connect_req(struct l2cap_conn *conn,
3851 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3852 {
3853 	struct hci_dev *hdev = conn->hcon->hdev;
3854 	struct hci_conn *hcon = conn->hcon;
3855 
3856 	if (cmd_len < sizeof(struct l2cap_conn_req))
3857 		return -EPROTO;
3858 
3859 	hci_dev_lock(hdev);
3860 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3861 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3862 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3863 				      hcon->dst_type, 0, NULL, 0,
3864 				      hcon->dev_class);
3865 	hci_dev_unlock(hdev);
3866 
3867 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3868 	return 0;
3869 }
3870 
3871 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3872 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3873 				    u8 *data)
3874 {
3875 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3876 	u16 scid, dcid, result, status;
3877 	struct l2cap_chan *chan;
3878 	u8 req[128];
3879 	int err;
3880 
3881 	if (cmd_len < sizeof(*rsp))
3882 		return -EPROTO;
3883 
3884 	scid   = __le16_to_cpu(rsp->scid);
3885 	dcid   = __le16_to_cpu(rsp->dcid);
3886 	result = __le16_to_cpu(rsp->result);
3887 	status = __le16_to_cpu(rsp->status);
3888 
3889 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3890 	       dcid, scid, result, status);
3891 
3892 	mutex_lock(&conn->chan_lock);
3893 
3894 	if (scid) {
3895 		chan = __l2cap_get_chan_by_scid(conn, scid);
3896 		if (!chan) {
3897 			err = -EBADSLT;
3898 			goto unlock;
3899 		}
3900 	} else {
3901 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3902 		if (!chan) {
3903 			err = -EBADSLT;
3904 			goto unlock;
3905 		}
3906 	}
3907 
3908 	err = 0;
3909 
3910 	l2cap_chan_lock(chan);
3911 
3912 	switch (result) {
3913 	case L2CAP_CR_SUCCESS:
3914 		l2cap_state_change(chan, BT_CONFIG);
3915 		chan->ident = 0;
3916 		chan->dcid = dcid;
3917 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3918 
3919 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3920 			break;
3921 
3922 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3923 			       l2cap_build_conf_req(chan, req), req);
3924 		chan->num_conf_req++;
3925 		break;
3926 
3927 	case L2CAP_CR_PEND:
3928 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3929 		break;
3930 
3931 	default:
3932 		l2cap_chan_del(chan, ECONNREFUSED);
3933 		break;
3934 	}
3935 
3936 	l2cap_chan_unlock(chan);
3937 
3938 unlock:
3939 	mutex_unlock(&conn->chan_lock);
3940 
3941 	return err;
3942 }
3943 
3944 static inline void set_default_fcs(struct l2cap_chan *chan)
3945 {
3946 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3947 	 * sides request it.
3948 	 */
3949 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3950 		chan->fcs = L2CAP_FCS_NONE;
3951 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3952 		chan->fcs = L2CAP_FCS_CRC16;
3953 }
3954 
3955 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3956 				    u8 ident, u16 flags)
3957 {
3958 	struct l2cap_conn *conn = chan->conn;
3959 
3960 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3961 	       flags);
3962 
3963 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3964 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3965 
3966 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3967 		       l2cap_build_conf_rsp(chan, data,
3968 					    L2CAP_CONF_SUCCESS, flags), data);
3969 }
3970 
3971 static inline int l2cap_config_req(struct l2cap_conn *conn,
3972 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3973 				   u8 *data)
3974 {
3975 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3976 	u16 dcid, flags;
3977 	u8 rsp[64];
3978 	struct l2cap_chan *chan;
3979 	int len, err = 0;
3980 
3981 	if (cmd_len < sizeof(*req))
3982 		return -EPROTO;
3983 
3984 	dcid  = __le16_to_cpu(req->dcid);
3985 	flags = __le16_to_cpu(req->flags);
3986 
3987 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3988 
3989 	chan = l2cap_get_chan_by_scid(conn, dcid);
3990 	if (!chan)
3991 		return -EBADSLT;
3992 
3993 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3994 		struct l2cap_cmd_rej_cid rej;
3995 
3996 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3997 		rej.scid = cpu_to_le16(chan->scid);
3998 		rej.dcid = cpu_to_le16(chan->dcid);
3999 
4000 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4001 			       sizeof(rej), &rej);
4002 		goto unlock;
4003 	}
4004 
4005 	/* Reject if config buffer is too small. */
4006 	len = cmd_len - sizeof(*req);
4007 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4008 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4009 			       l2cap_build_conf_rsp(chan, rsp,
4010 			       L2CAP_CONF_REJECT, flags), rsp);
4011 		goto unlock;
4012 	}
4013 
4014 	/* Store config. */
4015 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4016 	chan->conf_len += len;
4017 
4018 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4019 		/* Incomplete config. Send empty response. */
4020 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4021 			       l2cap_build_conf_rsp(chan, rsp,
4022 			       L2CAP_CONF_SUCCESS, flags), rsp);
4023 		goto unlock;
4024 	}
4025 
4026 	/* Complete config. */
4027 	len = l2cap_parse_conf_req(chan, rsp);
4028 	if (len < 0) {
4029 		l2cap_send_disconn_req(chan, ECONNRESET);
4030 		goto unlock;
4031 	}
4032 
4033 	chan->ident = cmd->ident;
4034 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4035 	chan->num_conf_rsp++;
4036 
4037 	/* Reset config buffer. */
4038 	chan->conf_len = 0;
4039 
4040 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4041 		goto unlock;
4042 
4043 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4044 		set_default_fcs(chan);
4045 
4046 		if (chan->mode == L2CAP_MODE_ERTM ||
4047 		    chan->mode == L2CAP_MODE_STREAMING)
4048 			err = l2cap_ertm_init(chan);
4049 
4050 		if (err < 0)
4051 			l2cap_send_disconn_req(chan, -err);
4052 		else
4053 			l2cap_chan_ready(chan);
4054 
4055 		goto unlock;
4056 	}
4057 
4058 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4059 		u8 buf[64];
4060 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4061 			       l2cap_build_conf_req(chan, buf), buf);
4062 		chan->num_conf_req++;
4063 	}
4064 
4065 	/* Got Conf Rsp PENDING from remote side and asume we sent
4066 	   Conf Rsp PENDING in the code above */
4067 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4068 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4069 
4070 		/* check compatibility */
4071 
4072 		/* Send rsp for BR/EDR channel */
4073 		if (!chan->hs_hcon)
4074 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4075 		else
4076 			chan->ident = cmd->ident;
4077 	}
4078 
4079 unlock:
4080 	l2cap_chan_unlock(chan);
4081 	return err;
4082 }
4083 
4084 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4085 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4086 				   u8 *data)
4087 {
4088 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4089 	u16 scid, flags, result;
4090 	struct l2cap_chan *chan;
4091 	int len = cmd_len - sizeof(*rsp);
4092 	int err = 0;
4093 
4094 	if (cmd_len < sizeof(*rsp))
4095 		return -EPROTO;
4096 
4097 	scid   = __le16_to_cpu(rsp->scid);
4098 	flags  = __le16_to_cpu(rsp->flags);
4099 	result = __le16_to_cpu(rsp->result);
4100 
4101 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4102 	       result, len);
4103 
4104 	chan = l2cap_get_chan_by_scid(conn, scid);
4105 	if (!chan)
4106 		return 0;
4107 
4108 	switch (result) {
4109 	case L2CAP_CONF_SUCCESS:
4110 		l2cap_conf_rfc_get(chan, rsp->data, len);
4111 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4112 		break;
4113 
4114 	case L2CAP_CONF_PENDING:
4115 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4116 
4117 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4118 			char buf[64];
4119 
4120 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4121 						   buf, &result);
4122 			if (len < 0) {
4123 				l2cap_send_disconn_req(chan, ECONNRESET);
4124 				goto done;
4125 			}
4126 
4127 			if (!chan->hs_hcon) {
4128 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4129 							0);
4130 			} else {
4131 				if (l2cap_check_efs(chan)) {
4132 					amp_create_logical_link(chan);
4133 					chan->ident = cmd->ident;
4134 				}
4135 			}
4136 		}
4137 		goto done;
4138 
4139 	case L2CAP_CONF_UNACCEPT:
4140 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4141 			char req[64];
4142 
4143 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4144 				l2cap_send_disconn_req(chan, ECONNRESET);
4145 				goto done;
4146 			}
4147 
4148 			/* throw out any old stored conf requests */
4149 			result = L2CAP_CONF_SUCCESS;
4150 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4151 						   req, &result);
4152 			if (len < 0) {
4153 				l2cap_send_disconn_req(chan, ECONNRESET);
4154 				goto done;
4155 			}
4156 
4157 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4158 				       L2CAP_CONF_REQ, len, req);
4159 			chan->num_conf_req++;
4160 			if (result != L2CAP_CONF_SUCCESS)
4161 				goto done;
4162 			break;
4163 		}
4164 
4165 	default:
4166 		l2cap_chan_set_err(chan, ECONNRESET);
4167 
4168 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4169 		l2cap_send_disconn_req(chan, ECONNRESET);
4170 		goto done;
4171 	}
4172 
4173 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4174 		goto done;
4175 
4176 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4177 
4178 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4179 		set_default_fcs(chan);
4180 
4181 		if (chan->mode == L2CAP_MODE_ERTM ||
4182 		    chan->mode == L2CAP_MODE_STREAMING)
4183 			err = l2cap_ertm_init(chan);
4184 
4185 		if (err < 0)
4186 			l2cap_send_disconn_req(chan, -err);
4187 		else
4188 			l2cap_chan_ready(chan);
4189 	}
4190 
4191 done:
4192 	l2cap_chan_unlock(chan);
4193 	return err;
4194 }
4195 
4196 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4197 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4198 				       u8 *data)
4199 {
4200 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4201 	struct l2cap_disconn_rsp rsp;
4202 	u16 dcid, scid;
4203 	struct l2cap_chan *chan;
4204 	struct sock *sk;
4205 
4206 	if (cmd_len != sizeof(*req))
4207 		return -EPROTO;
4208 
4209 	scid = __le16_to_cpu(req->scid);
4210 	dcid = __le16_to_cpu(req->dcid);
4211 
4212 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4213 
4214 	mutex_lock(&conn->chan_lock);
4215 
4216 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4217 	if (!chan) {
4218 		mutex_unlock(&conn->chan_lock);
4219 		return -EBADSLT;
4220 	}
4221 
4222 	l2cap_chan_lock(chan);
4223 
4224 	sk = chan->sk;
4225 
4226 	rsp.dcid = cpu_to_le16(chan->scid);
4227 	rsp.scid = cpu_to_le16(chan->dcid);
4228 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4229 
4230 	lock_sock(sk);
4231 	sk->sk_shutdown = SHUTDOWN_MASK;
4232 	release_sock(sk);
4233 
4234 	l2cap_chan_hold(chan);
4235 	l2cap_chan_del(chan, ECONNRESET);
4236 
4237 	l2cap_chan_unlock(chan);
4238 
4239 	chan->ops->close(chan);
4240 	l2cap_chan_put(chan);
4241 
4242 	mutex_unlock(&conn->chan_lock);
4243 
4244 	return 0;
4245 }
4246 
4247 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4248 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4249 				       u8 *data)
4250 {
4251 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4252 	u16 dcid, scid;
4253 	struct l2cap_chan *chan;
4254 
4255 	if (cmd_len != sizeof(*rsp))
4256 		return -EPROTO;
4257 
4258 	scid = __le16_to_cpu(rsp->scid);
4259 	dcid = __le16_to_cpu(rsp->dcid);
4260 
4261 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4262 
4263 	mutex_lock(&conn->chan_lock);
4264 
4265 	chan = __l2cap_get_chan_by_scid(conn, scid);
4266 	if (!chan) {
4267 		mutex_unlock(&conn->chan_lock);
4268 		return 0;
4269 	}
4270 
4271 	l2cap_chan_lock(chan);
4272 
4273 	l2cap_chan_hold(chan);
4274 	l2cap_chan_del(chan, 0);
4275 
4276 	l2cap_chan_unlock(chan);
4277 
4278 	chan->ops->close(chan);
4279 	l2cap_chan_put(chan);
4280 
4281 	mutex_unlock(&conn->chan_lock);
4282 
4283 	return 0;
4284 }
4285 
4286 static inline int l2cap_information_req(struct l2cap_conn *conn,
4287 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4288 					u8 *data)
4289 {
4290 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4291 	u16 type;
4292 
4293 	if (cmd_len != sizeof(*req))
4294 		return -EPROTO;
4295 
4296 	type = __le16_to_cpu(req->type);
4297 
4298 	BT_DBG("type 0x%4.4x", type);
4299 
4300 	if (type == L2CAP_IT_FEAT_MASK) {
4301 		u8 buf[8];
4302 		u32 feat_mask = l2cap_feat_mask;
4303 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4305 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4306 		if (!disable_ertm)
4307 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4308 				| L2CAP_FEAT_FCS;
4309 		if (conn->hs_enabled)
4310 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4311 				| L2CAP_FEAT_EXT_WINDOW;
4312 
4313 		put_unaligned_le32(feat_mask, rsp->data);
4314 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4315 			       buf);
4316 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4317 		u8 buf[12];
4318 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4319 
4320 		if (conn->hs_enabled)
4321 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4322 		else
4323 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4324 
4325 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4326 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4327 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4328 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4329 			       buf);
4330 	} else {
4331 		struct l2cap_info_rsp rsp;
4332 		rsp.type   = cpu_to_le16(type);
4333 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4334 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4335 			       &rsp);
4336 	}
4337 
4338 	return 0;
4339 }
4340 
4341 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4342 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4343 					u8 *data)
4344 {
4345 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4346 	u16 type, result;
4347 
4348 	if (cmd_len < sizeof(*rsp))
4349 		return -EPROTO;
4350 
4351 	type   = __le16_to_cpu(rsp->type);
4352 	result = __le16_to_cpu(rsp->result);
4353 
4354 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4355 
4356 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4357 	if (cmd->ident != conn->info_ident ||
4358 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4359 		return 0;
4360 
4361 	cancel_delayed_work(&conn->info_timer);
4362 
4363 	if (result != L2CAP_IR_SUCCESS) {
4364 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4365 		conn->info_ident = 0;
4366 
4367 		l2cap_conn_start(conn);
4368 
4369 		return 0;
4370 	}
4371 
4372 	switch (type) {
4373 	case L2CAP_IT_FEAT_MASK:
4374 		conn->feat_mask = get_unaligned_le32(rsp->data);
4375 
4376 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4377 			struct l2cap_info_req req;
4378 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4379 
4380 			conn->info_ident = l2cap_get_ident(conn);
4381 
4382 			l2cap_send_cmd(conn, conn->info_ident,
4383 				       L2CAP_INFO_REQ, sizeof(req), &req);
4384 		} else {
4385 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4386 			conn->info_ident = 0;
4387 
4388 			l2cap_conn_start(conn);
4389 		}
4390 		break;
4391 
4392 	case L2CAP_IT_FIXED_CHAN:
4393 		conn->fixed_chan_mask = rsp->data[0];
4394 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4395 		conn->info_ident = 0;
4396 
4397 		l2cap_conn_start(conn);
4398 		break;
4399 	}
4400 
4401 	return 0;
4402 }
4403 
4404 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4405 				    struct l2cap_cmd_hdr *cmd,
4406 				    u16 cmd_len, void *data)
4407 {
4408 	struct l2cap_create_chan_req *req = data;
4409 	struct l2cap_create_chan_rsp rsp;
4410 	struct l2cap_chan *chan;
4411 	struct hci_dev *hdev;
4412 	u16 psm, scid;
4413 
4414 	if (cmd_len != sizeof(*req))
4415 		return -EPROTO;
4416 
4417 	if (!conn->hs_enabled)
4418 		return -EINVAL;
4419 
4420 	psm = le16_to_cpu(req->psm);
4421 	scid = le16_to_cpu(req->scid);
4422 
4423 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4424 
4425 	/* For controller id 0 make BR/EDR connection */
4426 	if (req->amp_id == HCI_BREDR_ID) {
4427 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4428 			      req->amp_id);
4429 		return 0;
4430 	}
4431 
4432 	/* Validate AMP controller id */
4433 	hdev = hci_dev_get(req->amp_id);
4434 	if (!hdev)
4435 		goto error;
4436 
4437 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4438 		hci_dev_put(hdev);
4439 		goto error;
4440 	}
4441 
4442 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4443 			     req->amp_id);
4444 	if (chan) {
4445 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4446 		struct hci_conn *hs_hcon;
4447 
4448 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4449 		if (!hs_hcon) {
4450 			hci_dev_put(hdev);
4451 			return -EBADSLT;
4452 		}
4453 
4454 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4455 
4456 		mgr->bredr_chan = chan;
4457 		chan->hs_hcon = hs_hcon;
4458 		chan->fcs = L2CAP_FCS_NONE;
4459 		conn->mtu = hdev->block_mtu;
4460 	}
4461 
4462 	hci_dev_put(hdev);
4463 
4464 	return 0;
4465 
4466 error:
4467 	rsp.dcid = 0;
4468 	rsp.scid = cpu_to_le16(scid);
4469 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4470 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4471 
4472 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4473 		       sizeof(rsp), &rsp);
4474 
4475 	return 0;
4476 }
4477 
4478 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4479 {
4480 	struct l2cap_move_chan_req req;
4481 	u8 ident;
4482 
4483 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4484 
4485 	ident = l2cap_get_ident(chan->conn);
4486 	chan->ident = ident;
4487 
4488 	req.icid = cpu_to_le16(chan->scid);
4489 	req.dest_amp_id = dest_amp_id;
4490 
4491 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4492 		       &req);
4493 
4494 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4495 }
4496 
4497 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4498 {
4499 	struct l2cap_move_chan_rsp rsp;
4500 
4501 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4502 
4503 	rsp.icid = cpu_to_le16(chan->dcid);
4504 	rsp.result = cpu_to_le16(result);
4505 
4506 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4507 		       sizeof(rsp), &rsp);
4508 }
4509 
4510 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4511 {
4512 	struct l2cap_move_chan_cfm cfm;
4513 
4514 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4515 
4516 	chan->ident = l2cap_get_ident(chan->conn);
4517 
4518 	cfm.icid = cpu_to_le16(chan->scid);
4519 	cfm.result = cpu_to_le16(result);
4520 
4521 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4522 		       sizeof(cfm), &cfm);
4523 
4524 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4525 }
4526 
4527 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4528 {
4529 	struct l2cap_move_chan_cfm cfm;
4530 
4531 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4532 
4533 	cfm.icid = cpu_to_le16(icid);
4534 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4535 
4536 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4537 		       sizeof(cfm), &cfm);
4538 }
4539 
4540 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4541 					 u16 icid)
4542 {
4543 	struct l2cap_move_chan_cfm_rsp rsp;
4544 
4545 	BT_DBG("icid 0x%4.4x", icid);
4546 
4547 	rsp.icid = cpu_to_le16(icid);
4548 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4549 }
4550 
4551 static void __release_logical_link(struct l2cap_chan *chan)
4552 {
4553 	chan->hs_hchan = NULL;
4554 	chan->hs_hcon = NULL;
4555 
4556 	/* Placeholder - release the logical link */
4557 }
4558 
4559 static void l2cap_logical_fail(struct l2cap_chan *chan)
4560 {
4561 	/* Logical link setup failed */
4562 	if (chan->state != BT_CONNECTED) {
4563 		/* Create channel failure, disconnect */
4564 		l2cap_send_disconn_req(chan, ECONNRESET);
4565 		return;
4566 	}
4567 
4568 	switch (chan->move_role) {
4569 	case L2CAP_MOVE_ROLE_RESPONDER:
4570 		l2cap_move_done(chan);
4571 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4572 		break;
4573 	case L2CAP_MOVE_ROLE_INITIATOR:
4574 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4575 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4576 			/* Remote has only sent pending or
4577 			 * success responses, clean up
4578 			 */
4579 			l2cap_move_done(chan);
4580 		}
4581 
4582 		/* Other amp move states imply that the move
4583 		 * has already aborted
4584 		 */
4585 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4586 		break;
4587 	}
4588 }
4589 
4590 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4591 					struct hci_chan *hchan)
4592 {
4593 	struct l2cap_conf_rsp rsp;
4594 
4595 	chan->hs_hchan = hchan;
4596 	chan->hs_hcon->l2cap_data = chan->conn;
4597 
4598 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4599 
4600 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4601 		int err;
4602 
4603 		set_default_fcs(chan);
4604 
4605 		err = l2cap_ertm_init(chan);
4606 		if (err < 0)
4607 			l2cap_send_disconn_req(chan, -err);
4608 		else
4609 			l2cap_chan_ready(chan);
4610 	}
4611 }
4612 
4613 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4614 				      struct hci_chan *hchan)
4615 {
4616 	chan->hs_hcon = hchan->conn;
4617 	chan->hs_hcon->l2cap_data = chan->conn;
4618 
4619 	BT_DBG("move_state %d", chan->move_state);
4620 
4621 	switch (chan->move_state) {
4622 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4623 		/* Move confirm will be sent after a success
4624 		 * response is received
4625 		 */
4626 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4627 		break;
4628 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4629 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4630 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4631 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4632 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4633 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4634 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4635 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4636 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4637 		}
4638 		break;
4639 	default:
4640 		/* Move was not in expected state, free the channel */
4641 		__release_logical_link(chan);
4642 
4643 		chan->move_state = L2CAP_MOVE_STABLE;
4644 	}
4645 }
4646 
4647 /* Call with chan locked */
4648 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4649 		       u8 status)
4650 {
4651 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4652 
4653 	if (status) {
4654 		l2cap_logical_fail(chan);
4655 		__release_logical_link(chan);
4656 		return;
4657 	}
4658 
4659 	if (chan->state != BT_CONNECTED) {
4660 		/* Ignore logical link if channel is on BR/EDR */
4661 		if (chan->local_amp_id)
4662 			l2cap_logical_finish_create(chan, hchan);
4663 	} else {
4664 		l2cap_logical_finish_move(chan, hchan);
4665 	}
4666 }
4667 
4668 void l2cap_move_start(struct l2cap_chan *chan)
4669 {
4670 	BT_DBG("chan %p", chan);
4671 
4672 	if (chan->local_amp_id == HCI_BREDR_ID) {
4673 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4674 			return;
4675 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4676 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4677 		/* Placeholder - start physical link setup */
4678 	} else {
4679 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4680 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4681 		chan->move_id = 0;
4682 		l2cap_move_setup(chan);
4683 		l2cap_send_move_chan_req(chan, 0);
4684 	}
4685 }
4686 
4687 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4688 			    u8 local_amp_id, u8 remote_amp_id)
4689 {
4690 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4691 	       local_amp_id, remote_amp_id);
4692 
4693 	chan->fcs = L2CAP_FCS_NONE;
4694 
4695 	/* Outgoing channel on AMP */
4696 	if (chan->state == BT_CONNECT) {
4697 		if (result == L2CAP_CR_SUCCESS) {
4698 			chan->local_amp_id = local_amp_id;
4699 			l2cap_send_create_chan_req(chan, remote_amp_id);
4700 		} else {
4701 			/* Revert to BR/EDR connect */
4702 			l2cap_send_conn_req(chan);
4703 		}
4704 
4705 		return;
4706 	}
4707 
4708 	/* Incoming channel on AMP */
4709 	if (__l2cap_no_conn_pending(chan)) {
4710 		struct l2cap_conn_rsp rsp;
4711 		char buf[128];
4712 		rsp.scid = cpu_to_le16(chan->dcid);
4713 		rsp.dcid = cpu_to_le16(chan->scid);
4714 
4715 		if (result == L2CAP_CR_SUCCESS) {
4716 			/* Send successful response */
4717 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4718 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4719 		} else {
4720 			/* Send negative response */
4721 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4722 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4723 		}
4724 
4725 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4726 			       sizeof(rsp), &rsp);
4727 
4728 		if (result == L2CAP_CR_SUCCESS) {
4729 			__l2cap_state_change(chan, BT_CONFIG);
4730 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4731 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4732 				       L2CAP_CONF_REQ,
4733 				       l2cap_build_conf_req(chan, buf), buf);
4734 			chan->num_conf_req++;
4735 		}
4736 	}
4737 }
4738 
4739 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4740 				   u8 remote_amp_id)
4741 {
4742 	l2cap_move_setup(chan);
4743 	chan->move_id = local_amp_id;
4744 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4745 
4746 	l2cap_send_move_chan_req(chan, remote_amp_id);
4747 }
4748 
4749 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4750 {
4751 	struct hci_chan *hchan = NULL;
4752 
4753 	/* Placeholder - get hci_chan for logical link */
4754 
4755 	if (hchan) {
4756 		if (hchan->state == BT_CONNECTED) {
4757 			/* Logical link is ready to go */
4758 			chan->hs_hcon = hchan->conn;
4759 			chan->hs_hcon->l2cap_data = chan->conn;
4760 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4761 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4762 
4763 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4764 		} else {
4765 			/* Wait for logical link to be ready */
4766 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4767 		}
4768 	} else {
4769 		/* Logical link not available */
4770 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4771 	}
4772 }
4773 
4774 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4775 {
4776 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4777 		u8 rsp_result;
4778 		if (result == -EINVAL)
4779 			rsp_result = L2CAP_MR_BAD_ID;
4780 		else
4781 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4782 
4783 		l2cap_send_move_chan_rsp(chan, rsp_result);
4784 	}
4785 
4786 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4787 	chan->move_state = L2CAP_MOVE_STABLE;
4788 
4789 	/* Restart data transmission */
4790 	l2cap_ertm_send(chan);
4791 }
4792 
4793 /* Invoke with locked chan */
4794 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4795 {
4796 	u8 local_amp_id = chan->local_amp_id;
4797 	u8 remote_amp_id = chan->remote_amp_id;
4798 
4799 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4800 	       chan, result, local_amp_id, remote_amp_id);
4801 
4802 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4803 		l2cap_chan_unlock(chan);
4804 		return;
4805 	}
4806 
4807 	if (chan->state != BT_CONNECTED) {
4808 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4809 	} else if (result != L2CAP_MR_SUCCESS) {
4810 		l2cap_do_move_cancel(chan, result);
4811 	} else {
4812 		switch (chan->move_role) {
4813 		case L2CAP_MOVE_ROLE_INITIATOR:
4814 			l2cap_do_move_initiate(chan, local_amp_id,
4815 					       remote_amp_id);
4816 			break;
4817 		case L2CAP_MOVE_ROLE_RESPONDER:
4818 			l2cap_do_move_respond(chan, result);
4819 			break;
4820 		default:
4821 			l2cap_do_move_cancel(chan, result);
4822 			break;
4823 		}
4824 	}
4825 }
4826 
4827 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4828 					 struct l2cap_cmd_hdr *cmd,
4829 					 u16 cmd_len, void *data)
4830 {
4831 	struct l2cap_move_chan_req *req = data;
4832 	struct l2cap_move_chan_rsp rsp;
4833 	struct l2cap_chan *chan;
4834 	u16 icid = 0;
4835 	u16 result = L2CAP_MR_NOT_ALLOWED;
4836 
4837 	if (cmd_len != sizeof(*req))
4838 		return -EPROTO;
4839 
4840 	icid = le16_to_cpu(req->icid);
4841 
4842 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4843 
4844 	if (!conn->hs_enabled)
4845 		return -EINVAL;
4846 
4847 	chan = l2cap_get_chan_by_dcid(conn, icid);
4848 	if (!chan) {
4849 		rsp.icid = cpu_to_le16(icid);
4850 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4851 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4852 			       sizeof(rsp), &rsp);
4853 		return 0;
4854 	}
4855 
4856 	chan->ident = cmd->ident;
4857 
4858 	if (chan->scid < L2CAP_CID_DYN_START ||
4859 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4860 	    (chan->mode != L2CAP_MODE_ERTM &&
4861 	     chan->mode != L2CAP_MODE_STREAMING)) {
4862 		result = L2CAP_MR_NOT_ALLOWED;
4863 		goto send_move_response;
4864 	}
4865 
4866 	if (chan->local_amp_id == req->dest_amp_id) {
4867 		result = L2CAP_MR_SAME_ID;
4868 		goto send_move_response;
4869 	}
4870 
4871 	if (req->dest_amp_id) {
4872 		struct hci_dev *hdev;
4873 		hdev = hci_dev_get(req->dest_amp_id);
4874 		if (!hdev || hdev->dev_type != HCI_AMP ||
4875 		    !test_bit(HCI_UP, &hdev->flags)) {
4876 			if (hdev)
4877 				hci_dev_put(hdev);
4878 
4879 			result = L2CAP_MR_BAD_ID;
4880 			goto send_move_response;
4881 		}
4882 		hci_dev_put(hdev);
4883 	}
4884 
4885 	/* Detect a move collision.  Only send a collision response
4886 	 * if this side has "lost", otherwise proceed with the move.
4887 	 * The winner has the larger bd_addr.
4888 	 */
4889 	if ((__chan_is_moving(chan) ||
4890 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4891 	    bacmp(conn->src, conn->dst) > 0) {
4892 		result = L2CAP_MR_COLLISION;
4893 		goto send_move_response;
4894 	}
4895 
4896 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4897 	l2cap_move_setup(chan);
4898 	chan->move_id = req->dest_amp_id;
4899 	icid = chan->dcid;
4900 
4901 	if (!req->dest_amp_id) {
4902 		/* Moving to BR/EDR */
4903 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4904 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4905 			result = L2CAP_MR_PEND;
4906 		} else {
4907 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4908 			result = L2CAP_MR_SUCCESS;
4909 		}
4910 	} else {
4911 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4912 		/* Placeholder - uncomment when amp functions are available */
4913 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4914 		result = L2CAP_MR_PEND;
4915 	}
4916 
4917 send_move_response:
4918 	l2cap_send_move_chan_rsp(chan, result);
4919 
4920 	l2cap_chan_unlock(chan);
4921 
4922 	return 0;
4923 }
4924 
4925 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4926 {
4927 	struct l2cap_chan *chan;
4928 	struct hci_chan *hchan = NULL;
4929 
4930 	chan = l2cap_get_chan_by_scid(conn, icid);
4931 	if (!chan) {
4932 		l2cap_send_move_chan_cfm_icid(conn, icid);
4933 		return;
4934 	}
4935 
4936 	__clear_chan_timer(chan);
4937 	if (result == L2CAP_MR_PEND)
4938 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4939 
4940 	switch (chan->move_state) {
4941 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4942 		/* Move confirm will be sent when logical link
4943 		 * is complete.
4944 		 */
4945 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4946 		break;
4947 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4948 		if (result == L2CAP_MR_PEND) {
4949 			break;
4950 		} else if (test_bit(CONN_LOCAL_BUSY,
4951 				    &chan->conn_state)) {
4952 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4953 		} else {
4954 			/* Logical link is up or moving to BR/EDR,
4955 			 * proceed with move
4956 			 */
4957 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4958 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4959 		}
4960 		break;
4961 	case L2CAP_MOVE_WAIT_RSP:
4962 		/* Moving to AMP */
4963 		if (result == L2CAP_MR_SUCCESS) {
4964 			/* Remote is ready, send confirm immediately
4965 			 * after logical link is ready
4966 			 */
4967 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4968 		} else {
4969 			/* Both logical link and move success
4970 			 * are required to confirm
4971 			 */
4972 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4973 		}
4974 
4975 		/* Placeholder - get hci_chan for logical link */
4976 		if (!hchan) {
4977 			/* Logical link not available */
4978 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4979 			break;
4980 		}
4981 
4982 		/* If the logical link is not yet connected, do not
4983 		 * send confirmation.
4984 		 */
4985 		if (hchan->state != BT_CONNECTED)
4986 			break;
4987 
4988 		/* Logical link is already ready to go */
4989 
4990 		chan->hs_hcon = hchan->conn;
4991 		chan->hs_hcon->l2cap_data = chan->conn;
4992 
4993 		if (result == L2CAP_MR_SUCCESS) {
4994 			/* Can confirm now */
4995 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4996 		} else {
4997 			/* Now only need move success
4998 			 * to confirm
4999 			 */
5000 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5001 		}
5002 
5003 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5004 		break;
5005 	default:
5006 		/* Any other amp move state means the move failed. */
5007 		chan->move_id = chan->local_amp_id;
5008 		l2cap_move_done(chan);
5009 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5010 	}
5011 
5012 	l2cap_chan_unlock(chan);
5013 }
5014 
5015 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5016 			    u16 result)
5017 {
5018 	struct l2cap_chan *chan;
5019 
5020 	chan = l2cap_get_chan_by_ident(conn, ident);
5021 	if (!chan) {
5022 		/* Could not locate channel, icid is best guess */
5023 		l2cap_send_move_chan_cfm_icid(conn, icid);
5024 		return;
5025 	}
5026 
5027 	__clear_chan_timer(chan);
5028 
5029 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5030 		if (result == L2CAP_MR_COLLISION) {
5031 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5032 		} else {
5033 			/* Cleanup - cancel move */
5034 			chan->move_id = chan->local_amp_id;
5035 			l2cap_move_done(chan);
5036 		}
5037 	}
5038 
5039 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5040 
5041 	l2cap_chan_unlock(chan);
5042 }
5043 
5044 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5045 				  struct l2cap_cmd_hdr *cmd,
5046 				  u16 cmd_len, void *data)
5047 {
5048 	struct l2cap_move_chan_rsp *rsp = data;
5049 	u16 icid, result;
5050 
5051 	if (cmd_len != sizeof(*rsp))
5052 		return -EPROTO;
5053 
5054 	icid = le16_to_cpu(rsp->icid);
5055 	result = le16_to_cpu(rsp->result);
5056 
5057 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5058 
5059 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5060 		l2cap_move_continue(conn, icid, result);
5061 	else
5062 		l2cap_move_fail(conn, cmd->ident, icid, result);
5063 
5064 	return 0;
5065 }
5066 
5067 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5068 				      struct l2cap_cmd_hdr *cmd,
5069 				      u16 cmd_len, void *data)
5070 {
5071 	struct l2cap_move_chan_cfm *cfm = data;
5072 	struct l2cap_chan *chan;
5073 	u16 icid, result;
5074 
5075 	if (cmd_len != sizeof(*cfm))
5076 		return -EPROTO;
5077 
5078 	icid = le16_to_cpu(cfm->icid);
5079 	result = le16_to_cpu(cfm->result);
5080 
5081 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5082 
5083 	chan = l2cap_get_chan_by_dcid(conn, icid);
5084 	if (!chan) {
5085 		/* Spec requires a response even if the icid was not found */
5086 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5087 		return 0;
5088 	}
5089 
5090 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5091 		if (result == L2CAP_MC_CONFIRMED) {
5092 			chan->local_amp_id = chan->move_id;
5093 			if (!chan->local_amp_id)
5094 				__release_logical_link(chan);
5095 		} else {
5096 			chan->move_id = chan->local_amp_id;
5097 		}
5098 
5099 		l2cap_move_done(chan);
5100 	}
5101 
5102 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5103 
5104 	l2cap_chan_unlock(chan);
5105 
5106 	return 0;
5107 }
5108 
5109 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5110 						 struct l2cap_cmd_hdr *cmd,
5111 						 u16 cmd_len, void *data)
5112 {
5113 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5114 	struct l2cap_chan *chan;
5115 	u16 icid;
5116 
5117 	if (cmd_len != sizeof(*rsp))
5118 		return -EPROTO;
5119 
5120 	icid = le16_to_cpu(rsp->icid);
5121 
5122 	BT_DBG("icid 0x%4.4x", icid);
5123 
5124 	chan = l2cap_get_chan_by_scid(conn, icid);
5125 	if (!chan)
5126 		return 0;
5127 
5128 	__clear_chan_timer(chan);
5129 
5130 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5131 		chan->local_amp_id = chan->move_id;
5132 
5133 		if (!chan->local_amp_id && chan->hs_hchan)
5134 			__release_logical_link(chan);
5135 
5136 		l2cap_move_done(chan);
5137 	}
5138 
5139 	l2cap_chan_unlock(chan);
5140 
5141 	return 0;
5142 }
5143 
5144 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5145 					 u16 to_multiplier)
5146 {
5147 	u16 max_latency;
5148 
5149 	if (min > max || min < 6 || max > 3200)
5150 		return -EINVAL;
5151 
5152 	if (to_multiplier < 10 || to_multiplier > 3200)
5153 		return -EINVAL;
5154 
5155 	if (max >= to_multiplier * 8)
5156 		return -EINVAL;
5157 
5158 	max_latency = (to_multiplier * 8 / max) - 1;
5159 	if (latency > 499 || latency > max_latency)
5160 		return -EINVAL;
5161 
5162 	return 0;
5163 }
5164 
5165 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5166 					      struct l2cap_cmd_hdr *cmd,
5167 					      u8 *data)
5168 {
5169 	struct hci_conn *hcon = conn->hcon;
5170 	struct l2cap_conn_param_update_req *req;
5171 	struct l2cap_conn_param_update_rsp rsp;
5172 	u16 min, max, latency, to_multiplier, cmd_len;
5173 	int err;
5174 
5175 	if (!(hcon->link_mode & HCI_LM_MASTER))
5176 		return -EINVAL;
5177 
5178 	cmd_len = __le16_to_cpu(cmd->len);
5179 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5180 		return -EPROTO;
5181 
5182 	req = (struct l2cap_conn_param_update_req *) data;
5183 	min		= __le16_to_cpu(req->min);
5184 	max		= __le16_to_cpu(req->max);
5185 	latency		= __le16_to_cpu(req->latency);
5186 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5187 
5188 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5189 	       min, max, latency, to_multiplier);
5190 
5191 	memset(&rsp, 0, sizeof(rsp));
5192 
5193 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5194 	if (err)
5195 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5196 	else
5197 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5198 
5199 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5200 		       sizeof(rsp), &rsp);
5201 
5202 	if (!err)
5203 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5204 
5205 	return 0;
5206 }
5207 
5208 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5209 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5210 				      u8 *data)
5211 {
5212 	int err = 0;
5213 
5214 	switch (cmd->code) {
5215 	case L2CAP_COMMAND_REJ:
5216 		l2cap_command_rej(conn, cmd, cmd_len, data);
5217 		break;
5218 
5219 	case L2CAP_CONN_REQ:
5220 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5221 		break;
5222 
5223 	case L2CAP_CONN_RSP:
5224 	case L2CAP_CREATE_CHAN_RSP:
5225 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5226 		break;
5227 
5228 	case L2CAP_CONF_REQ:
5229 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5230 		break;
5231 
5232 	case L2CAP_CONF_RSP:
5233 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5234 		break;
5235 
5236 	case L2CAP_DISCONN_REQ:
5237 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5238 		break;
5239 
5240 	case L2CAP_DISCONN_RSP:
5241 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5242 		break;
5243 
5244 	case L2CAP_ECHO_REQ:
5245 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5246 		break;
5247 
5248 	case L2CAP_ECHO_RSP:
5249 		break;
5250 
5251 	case L2CAP_INFO_REQ:
5252 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5253 		break;
5254 
5255 	case L2CAP_INFO_RSP:
5256 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5257 		break;
5258 
5259 	case L2CAP_CREATE_CHAN_REQ:
5260 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5261 		break;
5262 
5263 	case L2CAP_MOVE_CHAN_REQ:
5264 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5265 		break;
5266 
5267 	case L2CAP_MOVE_CHAN_RSP:
5268 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5269 		break;
5270 
5271 	case L2CAP_MOVE_CHAN_CFM:
5272 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5273 		break;
5274 
5275 	case L2CAP_MOVE_CHAN_CFM_RSP:
5276 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5277 		break;
5278 
5279 	default:
5280 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5281 		err = -EINVAL;
5282 		break;
5283 	}
5284 
5285 	return err;
5286 }
5287 
5288 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5289 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5290 {
5291 	switch (cmd->code) {
5292 	case L2CAP_COMMAND_REJ:
5293 		return 0;
5294 
5295 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5296 		return l2cap_conn_param_update_req(conn, cmd, data);
5297 
5298 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5299 		return 0;
5300 
5301 	default:
5302 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5303 		return -EINVAL;
5304 	}
5305 }
5306 
5307 static __le16 l2cap_err_to_reason(int err)
5308 {
5309 	switch (err) {
5310 	case -EBADSLT:
5311 		return __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
5312 	case -EMSGSIZE:
5313 		return __constant_cpu_to_le16(L2CAP_REJ_MTU_EXCEEDED);
5314 	case -EINVAL:
5315 	case -EPROTO:
5316 	default:
5317 		return __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5318 	}
5319 }
5320 
5321 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5322 					struct sk_buff *skb)
5323 {
5324 	struct hci_conn *hcon = conn->hcon;
5325 	struct l2cap_cmd_hdr *cmd;
5326 	u16 len;
5327 	int err;
5328 
5329 	if (hcon->type != LE_LINK)
5330 		goto drop;
5331 
5332 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5333 		goto drop;
5334 
5335 	cmd = (void *) skb->data;
5336 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5337 
5338 	len = le16_to_cpu(cmd->len);
5339 
5340 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5341 
5342 	if (len != skb->len || !cmd->ident) {
5343 		BT_DBG("corrupted command");
5344 		goto drop;
5345 	}
5346 
5347 	err = l2cap_le_sig_cmd(conn, cmd, skb->data);
5348 	if (err) {
5349 		struct l2cap_cmd_rej_unk rej;
5350 
5351 		BT_ERR("Wrong link type (%d)", err);
5352 
5353 		rej.reason = l2cap_err_to_reason(err);
5354 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5355 			       sizeof(rej), &rej);
5356 	}
5357 
5358 drop:
5359 	kfree_skb(skb);
5360 }
5361 
5362 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5363 				     struct sk_buff *skb)
5364 {
5365 	struct hci_conn *hcon = conn->hcon;
5366 	u8 *data = skb->data;
5367 	int len = skb->len;
5368 	struct l2cap_cmd_hdr cmd;
5369 	int err;
5370 
5371 	l2cap_raw_recv(conn, skb);
5372 
5373 	if (hcon->type != ACL_LINK)
5374 		goto drop;
5375 
5376 	while (len >= L2CAP_CMD_HDR_SIZE) {
5377 		u16 cmd_len;
5378 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5379 		data += L2CAP_CMD_HDR_SIZE;
5380 		len  -= L2CAP_CMD_HDR_SIZE;
5381 
5382 		cmd_len = le16_to_cpu(cmd.len);
5383 
5384 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5385 		       cmd.ident);
5386 
5387 		if (cmd_len > len || !cmd.ident) {
5388 			BT_DBG("corrupted command");
5389 			break;
5390 		}
5391 
5392 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5393 		if (err) {
5394 			struct l2cap_cmd_rej_unk rej;
5395 
5396 			BT_ERR("Wrong link type (%d)", err);
5397 
5398 			rej.reason = l2cap_err_to_reason(err);
5399 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5400 				       sizeof(rej), &rej);
5401 		}
5402 
5403 		data += cmd_len;
5404 		len  -= cmd_len;
5405 	}
5406 
5407 drop:
5408 	kfree_skb(skb);
5409 }
5410 
5411 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5412 {
5413 	u16 our_fcs, rcv_fcs;
5414 	int hdr_size;
5415 
5416 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5417 		hdr_size = L2CAP_EXT_HDR_SIZE;
5418 	else
5419 		hdr_size = L2CAP_ENH_HDR_SIZE;
5420 
5421 	if (chan->fcs == L2CAP_FCS_CRC16) {
5422 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5423 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5424 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5425 
5426 		if (our_fcs != rcv_fcs)
5427 			return -EBADMSG;
5428 	}
5429 	return 0;
5430 }
5431 
5432 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5433 {
5434 	struct l2cap_ctrl control;
5435 
5436 	BT_DBG("chan %p", chan);
5437 
5438 	memset(&control, 0, sizeof(control));
5439 	control.sframe = 1;
5440 	control.final = 1;
5441 	control.reqseq = chan->buffer_seq;
5442 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5443 
5444 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5445 		control.super = L2CAP_SUPER_RNR;
5446 		l2cap_send_sframe(chan, &control);
5447 	}
5448 
5449 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5450 	    chan->unacked_frames > 0)
5451 		__set_retrans_timer(chan);
5452 
5453 	/* Send pending iframes */
5454 	l2cap_ertm_send(chan);
5455 
5456 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5457 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5458 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5459 		 * send it now.
5460 		 */
5461 		control.super = L2CAP_SUPER_RR;
5462 		l2cap_send_sframe(chan, &control);
5463 	}
5464 }
5465 
5466 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5467 			    struct sk_buff **last_frag)
5468 {
5469 	/* skb->len reflects data in skb as well as all fragments
5470 	 * skb->data_len reflects only data in fragments
5471 	 */
5472 	if (!skb_has_frag_list(skb))
5473 		skb_shinfo(skb)->frag_list = new_frag;
5474 
5475 	new_frag->next = NULL;
5476 
5477 	(*last_frag)->next = new_frag;
5478 	*last_frag = new_frag;
5479 
5480 	skb->len += new_frag->len;
5481 	skb->data_len += new_frag->len;
5482 	skb->truesize += new_frag->truesize;
5483 }
5484 
5485 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5486 				struct l2cap_ctrl *control)
5487 {
5488 	int err = -EINVAL;
5489 
5490 	switch (control->sar) {
5491 	case L2CAP_SAR_UNSEGMENTED:
5492 		if (chan->sdu)
5493 			break;
5494 
5495 		err = chan->ops->recv(chan, skb);
5496 		break;
5497 
5498 	case L2CAP_SAR_START:
5499 		if (chan->sdu)
5500 			break;
5501 
5502 		chan->sdu_len = get_unaligned_le16(skb->data);
5503 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5504 
5505 		if (chan->sdu_len > chan->imtu) {
5506 			err = -EMSGSIZE;
5507 			break;
5508 		}
5509 
5510 		if (skb->len >= chan->sdu_len)
5511 			break;
5512 
5513 		chan->sdu = skb;
5514 		chan->sdu_last_frag = skb;
5515 
5516 		skb = NULL;
5517 		err = 0;
5518 		break;
5519 
5520 	case L2CAP_SAR_CONTINUE:
5521 		if (!chan->sdu)
5522 			break;
5523 
5524 		append_skb_frag(chan->sdu, skb,
5525 				&chan->sdu_last_frag);
5526 		skb = NULL;
5527 
5528 		if (chan->sdu->len >= chan->sdu_len)
5529 			break;
5530 
5531 		err = 0;
5532 		break;
5533 
5534 	case L2CAP_SAR_END:
5535 		if (!chan->sdu)
5536 			break;
5537 
5538 		append_skb_frag(chan->sdu, skb,
5539 				&chan->sdu_last_frag);
5540 		skb = NULL;
5541 
5542 		if (chan->sdu->len != chan->sdu_len)
5543 			break;
5544 
5545 		err = chan->ops->recv(chan, chan->sdu);
5546 
5547 		if (!err) {
5548 			/* Reassembly complete */
5549 			chan->sdu = NULL;
5550 			chan->sdu_last_frag = NULL;
5551 			chan->sdu_len = 0;
5552 		}
5553 		break;
5554 	}
5555 
5556 	if (err) {
5557 		kfree_skb(skb);
5558 		kfree_skb(chan->sdu);
5559 		chan->sdu = NULL;
5560 		chan->sdu_last_frag = NULL;
5561 		chan->sdu_len = 0;
5562 	}
5563 
5564 	return err;
5565 }
5566 
5567 static int l2cap_resegment(struct l2cap_chan *chan)
5568 {
5569 	/* Placeholder */
5570 	return 0;
5571 }
5572 
5573 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5574 {
5575 	u8 event;
5576 
5577 	if (chan->mode != L2CAP_MODE_ERTM)
5578 		return;
5579 
5580 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5581 	l2cap_tx(chan, NULL, NULL, event);
5582 }
5583 
5584 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5585 {
5586 	int err = 0;
5587 	/* Pass sequential frames to l2cap_reassemble_sdu()
5588 	 * until a gap is encountered.
5589 	 */
5590 
5591 	BT_DBG("chan %p", chan);
5592 
5593 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5594 		struct sk_buff *skb;
5595 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5596 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5597 
5598 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5599 
5600 		if (!skb)
5601 			break;
5602 
5603 		skb_unlink(skb, &chan->srej_q);
5604 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5605 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5606 		if (err)
5607 			break;
5608 	}
5609 
5610 	if (skb_queue_empty(&chan->srej_q)) {
5611 		chan->rx_state = L2CAP_RX_STATE_RECV;
5612 		l2cap_send_ack(chan);
5613 	}
5614 
5615 	return err;
5616 }
5617 
5618 static void l2cap_handle_srej(struct l2cap_chan *chan,
5619 			      struct l2cap_ctrl *control)
5620 {
5621 	struct sk_buff *skb;
5622 
5623 	BT_DBG("chan %p, control %p", chan, control);
5624 
5625 	if (control->reqseq == chan->next_tx_seq) {
5626 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5627 		l2cap_send_disconn_req(chan, ECONNRESET);
5628 		return;
5629 	}
5630 
5631 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5632 
5633 	if (skb == NULL) {
5634 		BT_DBG("Seq %d not available for retransmission",
5635 		       control->reqseq);
5636 		return;
5637 	}
5638 
5639 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5640 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5641 		l2cap_send_disconn_req(chan, ECONNRESET);
5642 		return;
5643 	}
5644 
5645 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5646 
5647 	if (control->poll) {
5648 		l2cap_pass_to_tx(chan, control);
5649 
5650 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5651 		l2cap_retransmit(chan, control);
5652 		l2cap_ertm_send(chan);
5653 
5654 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5655 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5656 			chan->srej_save_reqseq = control->reqseq;
5657 		}
5658 	} else {
5659 		l2cap_pass_to_tx_fbit(chan, control);
5660 
5661 		if (control->final) {
5662 			if (chan->srej_save_reqseq != control->reqseq ||
5663 			    !test_and_clear_bit(CONN_SREJ_ACT,
5664 						&chan->conn_state))
5665 				l2cap_retransmit(chan, control);
5666 		} else {
5667 			l2cap_retransmit(chan, control);
5668 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5669 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5670 				chan->srej_save_reqseq = control->reqseq;
5671 			}
5672 		}
5673 	}
5674 }
5675 
5676 static void l2cap_handle_rej(struct l2cap_chan *chan,
5677 			     struct l2cap_ctrl *control)
5678 {
5679 	struct sk_buff *skb;
5680 
5681 	BT_DBG("chan %p, control %p", chan, control);
5682 
5683 	if (control->reqseq == chan->next_tx_seq) {
5684 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5685 		l2cap_send_disconn_req(chan, ECONNRESET);
5686 		return;
5687 	}
5688 
5689 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5690 
5691 	if (chan->max_tx && skb &&
5692 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5693 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5694 		l2cap_send_disconn_req(chan, ECONNRESET);
5695 		return;
5696 	}
5697 
5698 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5699 
5700 	l2cap_pass_to_tx(chan, control);
5701 
5702 	if (control->final) {
5703 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5704 			l2cap_retransmit_all(chan, control);
5705 	} else {
5706 		l2cap_retransmit_all(chan, control);
5707 		l2cap_ertm_send(chan);
5708 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5709 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5710 	}
5711 }
5712 
5713 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5714 {
5715 	BT_DBG("chan %p, txseq %d", chan, txseq);
5716 
5717 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5718 	       chan->expected_tx_seq);
5719 
5720 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5721 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5722 		    chan->tx_win) {
5723 			/* See notes below regarding "double poll" and
5724 			 * invalid packets.
5725 			 */
5726 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5727 				BT_DBG("Invalid/Ignore - after SREJ");
5728 				return L2CAP_TXSEQ_INVALID_IGNORE;
5729 			} else {
5730 				BT_DBG("Invalid - in window after SREJ sent");
5731 				return L2CAP_TXSEQ_INVALID;
5732 			}
5733 		}
5734 
5735 		if (chan->srej_list.head == txseq) {
5736 			BT_DBG("Expected SREJ");
5737 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5738 		}
5739 
5740 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5741 			BT_DBG("Duplicate SREJ - txseq already stored");
5742 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5743 		}
5744 
5745 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5746 			BT_DBG("Unexpected SREJ - not requested");
5747 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5748 		}
5749 	}
5750 
5751 	if (chan->expected_tx_seq == txseq) {
5752 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5753 		    chan->tx_win) {
5754 			BT_DBG("Invalid - txseq outside tx window");
5755 			return L2CAP_TXSEQ_INVALID;
5756 		} else {
5757 			BT_DBG("Expected");
5758 			return L2CAP_TXSEQ_EXPECTED;
5759 		}
5760 	}
5761 
5762 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5763 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5764 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5765 		return L2CAP_TXSEQ_DUPLICATE;
5766 	}
5767 
5768 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5769 		/* A source of invalid packets is a "double poll" condition,
5770 		 * where delays cause us to send multiple poll packets.  If
5771 		 * the remote stack receives and processes both polls,
5772 		 * sequence numbers can wrap around in such a way that a
5773 		 * resent frame has a sequence number that looks like new data
5774 		 * with a sequence gap.  This would trigger an erroneous SREJ
5775 		 * request.
5776 		 *
5777 		 * Fortunately, this is impossible with a tx window that's
5778 		 * less than half of the maximum sequence number, which allows
5779 		 * invalid frames to be safely ignored.
5780 		 *
5781 		 * With tx window sizes greater than half of the tx window
5782 		 * maximum, the frame is invalid and cannot be ignored.  This
5783 		 * causes a disconnect.
5784 		 */
5785 
5786 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5787 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5788 			return L2CAP_TXSEQ_INVALID_IGNORE;
5789 		} else {
5790 			BT_DBG("Invalid - txseq outside tx window");
5791 			return L2CAP_TXSEQ_INVALID;
5792 		}
5793 	} else {
5794 		BT_DBG("Unexpected - txseq indicates missing frames");
5795 		return L2CAP_TXSEQ_UNEXPECTED;
5796 	}
5797 }
5798 
5799 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5800 			       struct l2cap_ctrl *control,
5801 			       struct sk_buff *skb, u8 event)
5802 {
5803 	int err = 0;
5804 	bool skb_in_use = false;
5805 
5806 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5807 	       event);
5808 
5809 	switch (event) {
5810 	case L2CAP_EV_RECV_IFRAME:
5811 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5812 		case L2CAP_TXSEQ_EXPECTED:
5813 			l2cap_pass_to_tx(chan, control);
5814 
5815 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5816 				BT_DBG("Busy, discarding expected seq %d",
5817 				       control->txseq);
5818 				break;
5819 			}
5820 
5821 			chan->expected_tx_seq = __next_seq(chan,
5822 							   control->txseq);
5823 
5824 			chan->buffer_seq = chan->expected_tx_seq;
5825 			skb_in_use = true;
5826 
5827 			err = l2cap_reassemble_sdu(chan, skb, control);
5828 			if (err)
5829 				break;
5830 
5831 			if (control->final) {
5832 				if (!test_and_clear_bit(CONN_REJ_ACT,
5833 							&chan->conn_state)) {
5834 					control->final = 0;
5835 					l2cap_retransmit_all(chan, control);
5836 					l2cap_ertm_send(chan);
5837 				}
5838 			}
5839 
5840 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5841 				l2cap_send_ack(chan);
5842 			break;
5843 		case L2CAP_TXSEQ_UNEXPECTED:
5844 			l2cap_pass_to_tx(chan, control);
5845 
5846 			/* Can't issue SREJ frames in the local busy state.
5847 			 * Drop this frame, it will be seen as missing
5848 			 * when local busy is exited.
5849 			 */
5850 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5851 				BT_DBG("Busy, discarding unexpected seq %d",
5852 				       control->txseq);
5853 				break;
5854 			}
5855 
5856 			/* There was a gap in the sequence, so an SREJ
5857 			 * must be sent for each missing frame.  The
5858 			 * current frame is stored for later use.
5859 			 */
5860 			skb_queue_tail(&chan->srej_q, skb);
5861 			skb_in_use = true;
5862 			BT_DBG("Queued %p (queue len %d)", skb,
5863 			       skb_queue_len(&chan->srej_q));
5864 
5865 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5866 			l2cap_seq_list_clear(&chan->srej_list);
5867 			l2cap_send_srej(chan, control->txseq);
5868 
5869 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5870 			break;
5871 		case L2CAP_TXSEQ_DUPLICATE:
5872 			l2cap_pass_to_tx(chan, control);
5873 			break;
5874 		case L2CAP_TXSEQ_INVALID_IGNORE:
5875 			break;
5876 		case L2CAP_TXSEQ_INVALID:
5877 		default:
5878 			l2cap_send_disconn_req(chan, ECONNRESET);
5879 			break;
5880 		}
5881 		break;
5882 	case L2CAP_EV_RECV_RR:
5883 		l2cap_pass_to_tx(chan, control);
5884 		if (control->final) {
5885 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5886 
5887 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5888 			    !__chan_is_moving(chan)) {
5889 				control->final = 0;
5890 				l2cap_retransmit_all(chan, control);
5891 			}
5892 
5893 			l2cap_ertm_send(chan);
5894 		} else if (control->poll) {
5895 			l2cap_send_i_or_rr_or_rnr(chan);
5896 		} else {
5897 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5898 					       &chan->conn_state) &&
5899 			    chan->unacked_frames)
5900 				__set_retrans_timer(chan);
5901 
5902 			l2cap_ertm_send(chan);
5903 		}
5904 		break;
5905 	case L2CAP_EV_RECV_RNR:
5906 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5907 		l2cap_pass_to_tx(chan, control);
5908 		if (control && control->poll) {
5909 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5910 			l2cap_send_rr_or_rnr(chan, 0);
5911 		}
5912 		__clear_retrans_timer(chan);
5913 		l2cap_seq_list_clear(&chan->retrans_list);
5914 		break;
5915 	case L2CAP_EV_RECV_REJ:
5916 		l2cap_handle_rej(chan, control);
5917 		break;
5918 	case L2CAP_EV_RECV_SREJ:
5919 		l2cap_handle_srej(chan, control);
5920 		break;
5921 	default:
5922 		break;
5923 	}
5924 
5925 	if (skb && !skb_in_use) {
5926 		BT_DBG("Freeing %p", skb);
5927 		kfree_skb(skb);
5928 	}
5929 
5930 	return err;
5931 }
5932 
5933 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5934 				    struct l2cap_ctrl *control,
5935 				    struct sk_buff *skb, u8 event)
5936 {
5937 	int err = 0;
5938 	u16 txseq = control->txseq;
5939 	bool skb_in_use = false;
5940 
5941 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5942 	       event);
5943 
5944 	switch (event) {
5945 	case L2CAP_EV_RECV_IFRAME:
5946 		switch (l2cap_classify_txseq(chan, txseq)) {
5947 		case L2CAP_TXSEQ_EXPECTED:
5948 			/* Keep frame for reassembly later */
5949 			l2cap_pass_to_tx(chan, control);
5950 			skb_queue_tail(&chan->srej_q, skb);
5951 			skb_in_use = true;
5952 			BT_DBG("Queued %p (queue len %d)", skb,
5953 			       skb_queue_len(&chan->srej_q));
5954 
5955 			chan->expected_tx_seq = __next_seq(chan, txseq);
5956 			break;
5957 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5958 			l2cap_seq_list_pop(&chan->srej_list);
5959 
5960 			l2cap_pass_to_tx(chan, control);
5961 			skb_queue_tail(&chan->srej_q, skb);
5962 			skb_in_use = true;
5963 			BT_DBG("Queued %p (queue len %d)", skb,
5964 			       skb_queue_len(&chan->srej_q));
5965 
5966 			err = l2cap_rx_queued_iframes(chan);
5967 			if (err)
5968 				break;
5969 
5970 			break;
5971 		case L2CAP_TXSEQ_UNEXPECTED:
5972 			/* Got a frame that can't be reassembled yet.
5973 			 * Save it for later, and send SREJs to cover
5974 			 * the missing frames.
5975 			 */
5976 			skb_queue_tail(&chan->srej_q, skb);
5977 			skb_in_use = true;
5978 			BT_DBG("Queued %p (queue len %d)", skb,
5979 			       skb_queue_len(&chan->srej_q));
5980 
5981 			l2cap_pass_to_tx(chan, control);
5982 			l2cap_send_srej(chan, control->txseq);
5983 			break;
5984 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5985 			/* This frame was requested with an SREJ, but
5986 			 * some expected retransmitted frames are
5987 			 * missing.  Request retransmission of missing
5988 			 * SREJ'd frames.
5989 			 */
5990 			skb_queue_tail(&chan->srej_q, skb);
5991 			skb_in_use = true;
5992 			BT_DBG("Queued %p (queue len %d)", skb,
5993 			       skb_queue_len(&chan->srej_q));
5994 
5995 			l2cap_pass_to_tx(chan, control);
5996 			l2cap_send_srej_list(chan, control->txseq);
5997 			break;
5998 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5999 			/* We've already queued this frame.  Drop this copy. */
6000 			l2cap_pass_to_tx(chan, control);
6001 			break;
6002 		case L2CAP_TXSEQ_DUPLICATE:
6003 			/* Expecting a later sequence number, so this frame
6004 			 * was already received.  Ignore it completely.
6005 			 */
6006 			break;
6007 		case L2CAP_TXSEQ_INVALID_IGNORE:
6008 			break;
6009 		case L2CAP_TXSEQ_INVALID:
6010 		default:
6011 			l2cap_send_disconn_req(chan, ECONNRESET);
6012 			break;
6013 		}
6014 		break;
6015 	case L2CAP_EV_RECV_RR:
6016 		l2cap_pass_to_tx(chan, control);
6017 		if (control->final) {
6018 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6019 
6020 			if (!test_and_clear_bit(CONN_REJ_ACT,
6021 						&chan->conn_state)) {
6022 				control->final = 0;
6023 				l2cap_retransmit_all(chan, control);
6024 			}
6025 
6026 			l2cap_ertm_send(chan);
6027 		} else if (control->poll) {
6028 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6029 					       &chan->conn_state) &&
6030 			    chan->unacked_frames) {
6031 				__set_retrans_timer(chan);
6032 			}
6033 
6034 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6035 			l2cap_send_srej_tail(chan);
6036 		} else {
6037 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6038 					       &chan->conn_state) &&
6039 			    chan->unacked_frames)
6040 				__set_retrans_timer(chan);
6041 
6042 			l2cap_send_ack(chan);
6043 		}
6044 		break;
6045 	case L2CAP_EV_RECV_RNR:
6046 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6047 		l2cap_pass_to_tx(chan, control);
6048 		if (control->poll) {
6049 			l2cap_send_srej_tail(chan);
6050 		} else {
6051 			struct l2cap_ctrl rr_control;
6052 			memset(&rr_control, 0, sizeof(rr_control));
6053 			rr_control.sframe = 1;
6054 			rr_control.super = L2CAP_SUPER_RR;
6055 			rr_control.reqseq = chan->buffer_seq;
6056 			l2cap_send_sframe(chan, &rr_control);
6057 		}
6058 
6059 		break;
6060 	case L2CAP_EV_RECV_REJ:
6061 		l2cap_handle_rej(chan, control);
6062 		break;
6063 	case L2CAP_EV_RECV_SREJ:
6064 		l2cap_handle_srej(chan, control);
6065 		break;
6066 	}
6067 
6068 	if (skb && !skb_in_use) {
6069 		BT_DBG("Freeing %p", skb);
6070 		kfree_skb(skb);
6071 	}
6072 
6073 	return err;
6074 }
6075 
6076 static int l2cap_finish_move(struct l2cap_chan *chan)
6077 {
6078 	BT_DBG("chan %p", chan);
6079 
6080 	chan->rx_state = L2CAP_RX_STATE_RECV;
6081 
6082 	if (chan->hs_hcon)
6083 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6084 	else
6085 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6086 
6087 	return l2cap_resegment(chan);
6088 }
6089 
6090 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6091 				 struct l2cap_ctrl *control,
6092 				 struct sk_buff *skb, u8 event)
6093 {
6094 	int err;
6095 
6096 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6097 	       event);
6098 
6099 	if (!control->poll)
6100 		return -EPROTO;
6101 
6102 	l2cap_process_reqseq(chan, control->reqseq);
6103 
6104 	if (!skb_queue_empty(&chan->tx_q))
6105 		chan->tx_send_head = skb_peek(&chan->tx_q);
6106 	else
6107 		chan->tx_send_head = NULL;
6108 
6109 	/* Rewind next_tx_seq to the point expected
6110 	 * by the receiver.
6111 	 */
6112 	chan->next_tx_seq = control->reqseq;
6113 	chan->unacked_frames = 0;
6114 
6115 	err = l2cap_finish_move(chan);
6116 	if (err)
6117 		return err;
6118 
6119 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6120 	l2cap_send_i_or_rr_or_rnr(chan);
6121 
6122 	if (event == L2CAP_EV_RECV_IFRAME)
6123 		return -EPROTO;
6124 
6125 	return l2cap_rx_state_recv(chan, control, NULL, event);
6126 }
6127 
6128 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6129 				 struct l2cap_ctrl *control,
6130 				 struct sk_buff *skb, u8 event)
6131 {
6132 	int err;
6133 
6134 	if (!control->final)
6135 		return -EPROTO;
6136 
6137 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6138 
6139 	chan->rx_state = L2CAP_RX_STATE_RECV;
6140 	l2cap_process_reqseq(chan, control->reqseq);
6141 
6142 	if (!skb_queue_empty(&chan->tx_q))
6143 		chan->tx_send_head = skb_peek(&chan->tx_q);
6144 	else
6145 		chan->tx_send_head = NULL;
6146 
6147 	/* Rewind next_tx_seq to the point expected
6148 	 * by the receiver.
6149 	 */
6150 	chan->next_tx_seq = control->reqseq;
6151 	chan->unacked_frames = 0;
6152 
6153 	if (chan->hs_hcon)
6154 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6155 	else
6156 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6157 
6158 	err = l2cap_resegment(chan);
6159 
6160 	if (!err)
6161 		err = l2cap_rx_state_recv(chan, control, skb, event);
6162 
6163 	return err;
6164 }
6165 
6166 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6167 {
6168 	/* Make sure reqseq is for a packet that has been sent but not acked */
6169 	u16 unacked;
6170 
6171 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6172 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6173 }
6174 
6175 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6176 		    struct sk_buff *skb, u8 event)
6177 {
6178 	int err = 0;
6179 
6180 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6181 	       control, skb, event, chan->rx_state);
6182 
6183 	if (__valid_reqseq(chan, control->reqseq)) {
6184 		switch (chan->rx_state) {
6185 		case L2CAP_RX_STATE_RECV:
6186 			err = l2cap_rx_state_recv(chan, control, skb, event);
6187 			break;
6188 		case L2CAP_RX_STATE_SREJ_SENT:
6189 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6190 						       event);
6191 			break;
6192 		case L2CAP_RX_STATE_WAIT_P:
6193 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6194 			break;
6195 		case L2CAP_RX_STATE_WAIT_F:
6196 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6197 			break;
6198 		default:
6199 			/* shut it down */
6200 			break;
6201 		}
6202 	} else {
6203 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6204 		       control->reqseq, chan->next_tx_seq,
6205 		       chan->expected_ack_seq);
6206 		l2cap_send_disconn_req(chan, ECONNRESET);
6207 	}
6208 
6209 	return err;
6210 }
6211 
6212 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6213 			   struct sk_buff *skb)
6214 {
6215 	int err = 0;
6216 
6217 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6218 	       chan->rx_state);
6219 
6220 	if (l2cap_classify_txseq(chan, control->txseq) ==
6221 	    L2CAP_TXSEQ_EXPECTED) {
6222 		l2cap_pass_to_tx(chan, control);
6223 
6224 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6225 		       __next_seq(chan, chan->buffer_seq));
6226 
6227 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6228 
6229 		l2cap_reassemble_sdu(chan, skb, control);
6230 	} else {
6231 		if (chan->sdu) {
6232 			kfree_skb(chan->sdu);
6233 			chan->sdu = NULL;
6234 		}
6235 		chan->sdu_last_frag = NULL;
6236 		chan->sdu_len = 0;
6237 
6238 		if (skb) {
6239 			BT_DBG("Freeing %p", skb);
6240 			kfree_skb(skb);
6241 		}
6242 	}
6243 
6244 	chan->last_acked_seq = control->txseq;
6245 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6246 
6247 	return err;
6248 }
6249 
6250 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6251 {
6252 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6253 	u16 len;
6254 	u8 event;
6255 
6256 	__unpack_control(chan, skb);
6257 
6258 	len = skb->len;
6259 
6260 	/*
6261 	 * We can just drop the corrupted I-frame here.
6262 	 * Receiver will miss it and start proper recovery
6263 	 * procedures and ask for retransmission.
6264 	 */
6265 	if (l2cap_check_fcs(chan, skb))
6266 		goto drop;
6267 
6268 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6269 		len -= L2CAP_SDULEN_SIZE;
6270 
6271 	if (chan->fcs == L2CAP_FCS_CRC16)
6272 		len -= L2CAP_FCS_SIZE;
6273 
6274 	if (len > chan->mps) {
6275 		l2cap_send_disconn_req(chan, ECONNRESET);
6276 		goto drop;
6277 	}
6278 
6279 	if (!control->sframe) {
6280 		int err;
6281 
6282 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6283 		       control->sar, control->reqseq, control->final,
6284 		       control->txseq);
6285 
6286 		/* Validate F-bit - F=0 always valid, F=1 only
6287 		 * valid in TX WAIT_F
6288 		 */
6289 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6290 			goto drop;
6291 
6292 		if (chan->mode != L2CAP_MODE_STREAMING) {
6293 			event = L2CAP_EV_RECV_IFRAME;
6294 			err = l2cap_rx(chan, control, skb, event);
6295 		} else {
6296 			err = l2cap_stream_rx(chan, control, skb);
6297 		}
6298 
6299 		if (err)
6300 			l2cap_send_disconn_req(chan, ECONNRESET);
6301 	} else {
6302 		const u8 rx_func_to_event[4] = {
6303 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6304 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6305 		};
6306 
6307 		/* Only I-frames are expected in streaming mode */
6308 		if (chan->mode == L2CAP_MODE_STREAMING)
6309 			goto drop;
6310 
6311 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6312 		       control->reqseq, control->final, control->poll,
6313 		       control->super);
6314 
6315 		if (len != 0) {
6316 			BT_ERR("Trailing bytes: %d in sframe", len);
6317 			l2cap_send_disconn_req(chan, ECONNRESET);
6318 			goto drop;
6319 		}
6320 
6321 		/* Validate F and P bits */
6322 		if (control->final && (control->poll ||
6323 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6324 			goto drop;
6325 
6326 		event = rx_func_to_event[control->super];
6327 		if (l2cap_rx(chan, control, skb, event))
6328 			l2cap_send_disconn_req(chan, ECONNRESET);
6329 	}
6330 
6331 	return 0;
6332 
6333 drop:
6334 	kfree_skb(skb);
6335 	return 0;
6336 }
6337 
6338 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6339 			       struct sk_buff *skb)
6340 {
6341 	struct l2cap_chan *chan;
6342 
6343 	chan = l2cap_get_chan_by_scid(conn, cid);
6344 	if (!chan) {
6345 		if (cid == L2CAP_CID_A2MP) {
6346 			chan = a2mp_channel_create(conn, skb);
6347 			if (!chan) {
6348 				kfree_skb(skb);
6349 				return;
6350 			}
6351 
6352 			l2cap_chan_lock(chan);
6353 		} else {
6354 			BT_DBG("unknown cid 0x%4.4x", cid);
6355 			/* Drop packet and return */
6356 			kfree_skb(skb);
6357 			return;
6358 		}
6359 	}
6360 
6361 	BT_DBG("chan %p, len %d", chan, skb->len);
6362 
6363 	if (chan->state != BT_CONNECTED)
6364 		goto drop;
6365 
6366 	switch (chan->mode) {
6367 	case L2CAP_MODE_BASIC:
6368 		/* If socket recv buffers overflows we drop data here
6369 		 * which is *bad* because L2CAP has to be reliable.
6370 		 * But we don't have any other choice. L2CAP doesn't
6371 		 * provide flow control mechanism. */
6372 
6373 		if (chan->imtu < skb->len)
6374 			goto drop;
6375 
6376 		if (!chan->ops->recv(chan, skb))
6377 			goto done;
6378 		break;
6379 
6380 	case L2CAP_MODE_ERTM:
6381 	case L2CAP_MODE_STREAMING:
6382 		l2cap_data_rcv(chan, skb);
6383 		goto done;
6384 
6385 	default:
6386 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6387 		break;
6388 	}
6389 
6390 drop:
6391 	kfree_skb(skb);
6392 
6393 done:
6394 	l2cap_chan_unlock(chan);
6395 }
6396 
6397 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6398 				  struct sk_buff *skb)
6399 {
6400 	struct hci_conn *hcon = conn->hcon;
6401 	struct l2cap_chan *chan;
6402 
6403 	if (hcon->type != ACL_LINK)
6404 		goto drop;
6405 
6406 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6407 	if (!chan)
6408 		goto drop;
6409 
6410 	BT_DBG("chan %p, len %d", chan, skb->len);
6411 
6412 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6413 		goto drop;
6414 
6415 	if (chan->imtu < skb->len)
6416 		goto drop;
6417 
6418 	if (!chan->ops->recv(chan, skb))
6419 		return;
6420 
6421 drop:
6422 	kfree_skb(skb);
6423 }
6424 
6425 static void l2cap_att_channel(struct l2cap_conn *conn,
6426 			      struct sk_buff *skb)
6427 {
6428 	struct hci_conn *hcon = conn->hcon;
6429 	struct l2cap_chan *chan;
6430 
6431 	if (hcon->type != LE_LINK)
6432 		goto drop;
6433 
6434 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6435 					 conn->src, conn->dst);
6436 	if (!chan)
6437 		goto drop;
6438 
6439 	BT_DBG("chan %p, len %d", chan, skb->len);
6440 
6441 	if (chan->imtu < skb->len)
6442 		goto drop;
6443 
6444 	if (!chan->ops->recv(chan, skb))
6445 		return;
6446 
6447 drop:
6448 	kfree_skb(skb);
6449 }
6450 
6451 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6452 {
6453 	struct l2cap_hdr *lh = (void *) skb->data;
6454 	u16 cid, len;
6455 	__le16 psm;
6456 
6457 	skb_pull(skb, L2CAP_HDR_SIZE);
6458 	cid = __le16_to_cpu(lh->cid);
6459 	len = __le16_to_cpu(lh->len);
6460 
6461 	if (len != skb->len) {
6462 		kfree_skb(skb);
6463 		return;
6464 	}
6465 
6466 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6467 
6468 	switch (cid) {
6469 	case L2CAP_CID_SIGNALING:
6470 		l2cap_sig_channel(conn, skb);
6471 		break;
6472 
6473 	case L2CAP_CID_CONN_LESS:
6474 		psm = get_unaligned((__le16 *) skb->data);
6475 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6476 		l2cap_conless_channel(conn, psm, skb);
6477 		break;
6478 
6479 	case L2CAP_CID_ATT:
6480 		l2cap_att_channel(conn, skb);
6481 		break;
6482 
6483 	case L2CAP_CID_LE_SIGNALING:
6484 		l2cap_le_sig_channel(conn, skb);
6485 		break;
6486 
6487 	case L2CAP_CID_SMP:
6488 		if (smp_sig_channel(conn, skb))
6489 			l2cap_conn_del(conn->hcon, EACCES);
6490 		break;
6491 
6492 	default:
6493 		l2cap_data_channel(conn, cid, skb);
6494 		break;
6495 	}
6496 }
6497 
6498 /* ---- L2CAP interface with lower layer (HCI) ---- */
6499 
6500 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6501 {
6502 	int exact = 0, lm1 = 0, lm2 = 0;
6503 	struct l2cap_chan *c;
6504 
6505 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6506 
6507 	/* Find listening sockets and check their link_mode */
6508 	read_lock(&chan_list_lock);
6509 	list_for_each_entry(c, &chan_list, global_l) {
6510 		struct sock *sk = c->sk;
6511 
6512 		if (c->state != BT_LISTEN)
6513 			continue;
6514 
6515 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6516 			lm1 |= HCI_LM_ACCEPT;
6517 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6518 				lm1 |= HCI_LM_MASTER;
6519 			exact++;
6520 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6521 			lm2 |= HCI_LM_ACCEPT;
6522 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6523 				lm2 |= HCI_LM_MASTER;
6524 		}
6525 	}
6526 	read_unlock(&chan_list_lock);
6527 
6528 	return exact ? lm1 : lm2;
6529 }
6530 
6531 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6532 {
6533 	struct l2cap_conn *conn;
6534 
6535 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6536 
6537 	if (!status) {
6538 		conn = l2cap_conn_add(hcon);
6539 		if (conn)
6540 			l2cap_conn_ready(conn);
6541 	} else {
6542 		l2cap_conn_del(hcon, bt_to_errno(status));
6543 	}
6544 }
6545 
6546 int l2cap_disconn_ind(struct hci_conn *hcon)
6547 {
6548 	struct l2cap_conn *conn = hcon->l2cap_data;
6549 
6550 	BT_DBG("hcon %p", hcon);
6551 
6552 	if (!conn)
6553 		return HCI_ERROR_REMOTE_USER_TERM;
6554 	return conn->disc_reason;
6555 }
6556 
6557 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6558 {
6559 	BT_DBG("hcon %p reason %d", hcon, reason);
6560 
6561 	l2cap_conn_del(hcon, bt_to_errno(reason));
6562 }
6563 
6564 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6565 {
6566 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6567 		return;
6568 
6569 	if (encrypt == 0x00) {
6570 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6571 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6572 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6573 			l2cap_chan_close(chan, ECONNREFUSED);
6574 	} else {
6575 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6576 			__clear_chan_timer(chan);
6577 	}
6578 }
6579 
6580 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6581 {
6582 	struct l2cap_conn *conn = hcon->l2cap_data;
6583 	struct l2cap_chan *chan;
6584 
6585 	if (!conn)
6586 		return 0;
6587 
6588 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6589 
6590 	if (hcon->type == LE_LINK) {
6591 		if (!status && encrypt)
6592 			smp_distribute_keys(conn, 0);
6593 		cancel_delayed_work(&conn->security_timer);
6594 	}
6595 
6596 	mutex_lock(&conn->chan_lock);
6597 
6598 	list_for_each_entry(chan, &conn->chan_l, list) {
6599 		l2cap_chan_lock(chan);
6600 
6601 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6602 		       state_to_string(chan->state));
6603 
6604 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6605 			l2cap_chan_unlock(chan);
6606 			continue;
6607 		}
6608 
6609 		if (chan->scid == L2CAP_CID_ATT) {
6610 			if (!status && encrypt) {
6611 				chan->sec_level = hcon->sec_level;
6612 				l2cap_chan_ready(chan);
6613 			}
6614 
6615 			l2cap_chan_unlock(chan);
6616 			continue;
6617 		}
6618 
6619 		if (!__l2cap_no_conn_pending(chan)) {
6620 			l2cap_chan_unlock(chan);
6621 			continue;
6622 		}
6623 
6624 		if (!status && (chan->state == BT_CONNECTED ||
6625 				chan->state == BT_CONFIG)) {
6626 			struct sock *sk = chan->sk;
6627 
6628 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6629 			sk->sk_state_change(sk);
6630 
6631 			l2cap_check_encryption(chan, encrypt);
6632 			l2cap_chan_unlock(chan);
6633 			continue;
6634 		}
6635 
6636 		if (chan->state == BT_CONNECT) {
6637 			if (!status) {
6638 				l2cap_start_connection(chan);
6639 			} else {
6640 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6641 			}
6642 		} else if (chan->state == BT_CONNECT2) {
6643 			struct sock *sk = chan->sk;
6644 			struct l2cap_conn_rsp rsp;
6645 			__u16 res, stat;
6646 
6647 			lock_sock(sk);
6648 
6649 			if (!status) {
6650 				if (test_bit(BT_SK_DEFER_SETUP,
6651 					     &bt_sk(sk)->flags)) {
6652 					res = L2CAP_CR_PEND;
6653 					stat = L2CAP_CS_AUTHOR_PEND;
6654 					chan->ops->defer(chan);
6655 				} else {
6656 					__l2cap_state_change(chan, BT_CONFIG);
6657 					res = L2CAP_CR_SUCCESS;
6658 					stat = L2CAP_CS_NO_INFO;
6659 				}
6660 			} else {
6661 				__l2cap_state_change(chan, BT_DISCONN);
6662 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6663 				res = L2CAP_CR_SEC_BLOCK;
6664 				stat = L2CAP_CS_NO_INFO;
6665 			}
6666 
6667 			release_sock(sk);
6668 
6669 			rsp.scid   = cpu_to_le16(chan->dcid);
6670 			rsp.dcid   = cpu_to_le16(chan->scid);
6671 			rsp.result = cpu_to_le16(res);
6672 			rsp.status = cpu_to_le16(stat);
6673 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6674 				       sizeof(rsp), &rsp);
6675 
6676 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6677 			    res == L2CAP_CR_SUCCESS) {
6678 				char buf[128];
6679 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6680 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6681 					       L2CAP_CONF_REQ,
6682 					       l2cap_build_conf_req(chan, buf),
6683 					       buf);
6684 				chan->num_conf_req++;
6685 			}
6686 		}
6687 
6688 		l2cap_chan_unlock(chan);
6689 	}
6690 
6691 	mutex_unlock(&conn->chan_lock);
6692 
6693 	return 0;
6694 }
6695 
6696 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6697 {
6698 	struct l2cap_conn *conn = hcon->l2cap_data;
6699 	struct l2cap_hdr *hdr;
6700 	int len;
6701 
6702 	/* For AMP controller do not create l2cap conn */
6703 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6704 		goto drop;
6705 
6706 	if (!conn)
6707 		conn = l2cap_conn_add(hcon);
6708 
6709 	if (!conn)
6710 		goto drop;
6711 
6712 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6713 
6714 	switch (flags) {
6715 	case ACL_START:
6716 	case ACL_START_NO_FLUSH:
6717 	case ACL_COMPLETE:
6718 		if (conn->rx_len) {
6719 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6720 			kfree_skb(conn->rx_skb);
6721 			conn->rx_skb = NULL;
6722 			conn->rx_len = 0;
6723 			l2cap_conn_unreliable(conn, ECOMM);
6724 		}
6725 
6726 		/* Start fragment always begin with Basic L2CAP header */
6727 		if (skb->len < L2CAP_HDR_SIZE) {
6728 			BT_ERR("Frame is too short (len %d)", skb->len);
6729 			l2cap_conn_unreliable(conn, ECOMM);
6730 			goto drop;
6731 		}
6732 
6733 		hdr = (struct l2cap_hdr *) skb->data;
6734 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6735 
6736 		if (len == skb->len) {
6737 			/* Complete frame received */
6738 			l2cap_recv_frame(conn, skb);
6739 			return 0;
6740 		}
6741 
6742 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6743 
6744 		if (skb->len > len) {
6745 			BT_ERR("Frame is too long (len %d, expected len %d)",
6746 			       skb->len, len);
6747 			l2cap_conn_unreliable(conn, ECOMM);
6748 			goto drop;
6749 		}
6750 
6751 		/* Allocate skb for the complete frame (with header) */
6752 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6753 		if (!conn->rx_skb)
6754 			goto drop;
6755 
6756 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6757 					  skb->len);
6758 		conn->rx_len = len - skb->len;
6759 		break;
6760 
6761 	case ACL_CONT:
6762 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6763 
6764 		if (!conn->rx_len) {
6765 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6766 			l2cap_conn_unreliable(conn, ECOMM);
6767 			goto drop;
6768 		}
6769 
6770 		if (skb->len > conn->rx_len) {
6771 			BT_ERR("Fragment is too long (len %d, expected %d)",
6772 			       skb->len, conn->rx_len);
6773 			kfree_skb(conn->rx_skb);
6774 			conn->rx_skb = NULL;
6775 			conn->rx_len = 0;
6776 			l2cap_conn_unreliable(conn, ECOMM);
6777 			goto drop;
6778 		}
6779 
6780 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6781 					  skb->len);
6782 		conn->rx_len -= skb->len;
6783 
6784 		if (!conn->rx_len) {
6785 			/* Complete frame received */
6786 			l2cap_recv_frame(conn, conn->rx_skb);
6787 			conn->rx_skb = NULL;
6788 		}
6789 		break;
6790 	}
6791 
6792 drop:
6793 	kfree_skb(skb);
6794 	return 0;
6795 }
6796 
6797 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6798 {
6799 	struct l2cap_chan *c;
6800 
6801 	read_lock(&chan_list_lock);
6802 
6803 	list_for_each_entry(c, &chan_list, global_l) {
6804 		struct sock *sk = c->sk;
6805 
6806 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6807 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6808 			   c->state, __le16_to_cpu(c->psm),
6809 			   c->scid, c->dcid, c->imtu, c->omtu,
6810 			   c->sec_level, c->mode);
6811 	}
6812 
6813 	read_unlock(&chan_list_lock);
6814 
6815 	return 0;
6816 }
6817 
6818 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6819 {
6820 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6821 }
6822 
6823 static const struct file_operations l2cap_debugfs_fops = {
6824 	.open		= l2cap_debugfs_open,
6825 	.read		= seq_read,
6826 	.llseek		= seq_lseek,
6827 	.release	= single_release,
6828 };
6829 
6830 static struct dentry *l2cap_debugfs;
6831 
6832 int __init l2cap_init(void)
6833 {
6834 	int err;
6835 
6836 	err = l2cap_init_sockets();
6837 	if (err < 0)
6838 		return err;
6839 
6840 	if (bt_debugfs) {
6841 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6842 						    NULL, &l2cap_debugfs_fops);
6843 		if (!l2cap_debugfs)
6844 			BT_ERR("Failed to create L2CAP debug file");
6845 	}
6846 
6847 	return 0;
6848 }
6849 
6850 void l2cap_exit(void)
6851 {
6852 	debugfs_remove(l2cap_debugfs);
6853 	l2cap_cleanup_sockets();
6854 }
6855 
6856 module_param(disable_ertm, bool, 0644);
6857 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6858