xref: /linux/net/bluetooth/l2cap_core.c (revision 2d87650a3bf1b80f7d0d150ee1af3f8a89e5b7aa)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 bool disable_ertm;
45 
46 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
47 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
48 
49 static LIST_HEAD(chan_list);
50 static DEFINE_RWLOCK(chan_list_lock);
51 
52 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
53 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
66 {
67 	if (hcon->type == LE_LINK) {
68 		if (type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 /* ---- L2CAP channels ---- */
78 
79 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
80 						   u16 cid)
81 {
82 	struct l2cap_chan *c;
83 
84 	list_for_each_entry(c, &conn->chan_l, list) {
85 		if (c->dcid == cid)
86 			return c;
87 	}
88 	return NULL;
89 }
90 
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
92 						   u16 cid)
93 {
94 	struct l2cap_chan *c;
95 
96 	list_for_each_entry(c, &conn->chan_l, list) {
97 		if (c->scid == cid)
98 			return c;
99 	}
100 	return NULL;
101 }
102 
103 /* Find channel with given SCID.
104  * Returns locked channel. */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
106 						 u16 cid)
107 {
108 	struct l2cap_chan *c;
109 
110 	mutex_lock(&conn->chan_lock);
111 	c = __l2cap_get_chan_by_scid(conn, cid);
112 	if (c)
113 		l2cap_chan_lock(c);
114 	mutex_unlock(&conn->chan_lock);
115 
116 	return c;
117 }
118 
119 /* Find channel with given DCID.
120  * Returns locked channel.
121  */
122 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
123 						 u16 cid)
124 {
125 	struct l2cap_chan *c;
126 
127 	mutex_lock(&conn->chan_lock);
128 	c = __l2cap_get_chan_by_dcid(conn, cid);
129 	if (c)
130 		l2cap_chan_lock(c);
131 	mutex_unlock(&conn->chan_lock);
132 
133 	return c;
134 }
135 
136 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
137 						    u8 ident)
138 {
139 	struct l2cap_chan *c;
140 
141 	list_for_each_entry(c, &conn->chan_l, list) {
142 		if (c->ident == ident)
143 			return c;
144 	}
145 	return NULL;
146 }
147 
148 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 						  u8 ident)
150 {
151 	struct l2cap_chan *c;
152 
153 	mutex_lock(&conn->chan_lock);
154 	c = __l2cap_get_chan_by_ident(conn, ident);
155 	if (c)
156 		l2cap_chan_lock(c);
157 	mutex_unlock(&conn->chan_lock);
158 
159 	return c;
160 }
161 
162 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
163 {
164 	struct l2cap_chan *c;
165 
166 	list_for_each_entry(c, &chan_list, global_l) {
167 		if (c->sport == psm && !bacmp(&c->src, src))
168 			return c;
169 	}
170 	return NULL;
171 }
172 
173 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
174 {
175 	int err;
176 
177 	write_lock(&chan_list_lock);
178 
179 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
180 		err = -EADDRINUSE;
181 		goto done;
182 	}
183 
184 	if (psm) {
185 		chan->psm = psm;
186 		chan->sport = psm;
187 		err = 0;
188 	} else {
189 		u16 p;
190 
191 		err = -EINVAL;
192 		for (p = 0x1001; p < 0x1100; p += 2)
193 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
194 				chan->psm   = cpu_to_le16(p);
195 				chan->sport = cpu_to_le16(p);
196 				err = 0;
197 				break;
198 			}
199 	}
200 
201 done:
202 	write_unlock(&chan_list_lock);
203 	return err;
204 }
205 
206 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
207 {
208 	write_lock(&chan_list_lock);
209 
210 	chan->scid = scid;
211 
212 	write_unlock(&chan_list_lock);
213 
214 	return 0;
215 }
216 
217 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
218 {
219 	u16 cid, dyn_end;
220 
221 	if (conn->hcon->type == LE_LINK)
222 		dyn_end = L2CAP_CID_LE_DYN_END;
223 	else
224 		dyn_end = L2CAP_CID_DYN_END;
225 
226 	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
227 		if (!__l2cap_get_chan_by_scid(conn, cid))
228 			return cid;
229 	}
230 
231 	return 0;
232 }
233 
234 static void l2cap_state_change(struct l2cap_chan *chan, int state)
235 {
236 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
237 	       state_to_string(state));
238 
239 	chan->state = state;
240 	chan->ops->state_change(chan, state, 0);
241 }
242 
243 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
244 						int state, int err)
245 {
246 	chan->state = state;
247 	chan->ops->state_change(chan, chan->state, err);
248 }
249 
250 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
251 {
252 	chan->ops->state_change(chan, chan->state, err);
253 }
254 
255 static void __set_retrans_timer(struct l2cap_chan *chan)
256 {
257 	if (!delayed_work_pending(&chan->monitor_timer) &&
258 	    chan->retrans_timeout) {
259 		l2cap_set_timer(chan, &chan->retrans_timer,
260 				msecs_to_jiffies(chan->retrans_timeout));
261 	}
262 }
263 
264 static void __set_monitor_timer(struct l2cap_chan *chan)
265 {
266 	__clear_retrans_timer(chan);
267 	if (chan->monitor_timeout) {
268 		l2cap_set_timer(chan, &chan->monitor_timer,
269 				msecs_to_jiffies(chan->monitor_timeout));
270 	}
271 }
272 
273 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
274 					       u16 seq)
275 {
276 	struct sk_buff *skb;
277 
278 	skb_queue_walk(head, skb) {
279 		if (bt_cb(skb)->control.txseq == seq)
280 			return skb;
281 	}
282 
283 	return NULL;
284 }
285 
286 /* ---- L2CAP sequence number lists ---- */
287 
288 /* For ERTM, ordered lists of sequence numbers must be tracked for
289  * SREJ requests that are received and for frames that are to be
290  * retransmitted. These seq_list functions implement a singly-linked
291  * list in an array, where membership in the list can also be checked
292  * in constant time. Items can also be added to the tail of the list
293  * and removed from the head in constant time, without further memory
294  * allocs or frees.
295  */
296 
297 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
298 {
299 	size_t alloc_size, i;
300 
301 	/* Allocated size is a power of 2 to map sequence numbers
302 	 * (which may be up to 14 bits) in to a smaller array that is
303 	 * sized for the negotiated ERTM transmit windows.
304 	 */
305 	alloc_size = roundup_pow_of_two(size);
306 
307 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
308 	if (!seq_list->list)
309 		return -ENOMEM;
310 
311 	seq_list->mask = alloc_size - 1;
312 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
313 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
314 	for (i = 0; i < alloc_size; i++)
315 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
316 
317 	return 0;
318 }
319 
320 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
321 {
322 	kfree(seq_list->list);
323 }
324 
325 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
326 					   u16 seq)
327 {
328 	/* Constant-time check for list membership */
329 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
330 }
331 
332 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
333 {
334 	u16 mask = seq_list->mask;
335 
336 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
337 		/* In case someone tries to pop the head of an empty list */
338 		return L2CAP_SEQ_LIST_CLEAR;
339 	} else if (seq_list->head == seq) {
340 		/* Head can be removed in constant time */
341 		seq_list->head = seq_list->list[seq & mask];
342 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
343 
344 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
345 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 		}
348 	} else {
349 		/* Walk the list to find the sequence number */
350 		u16 prev = seq_list->head;
351 		while (seq_list->list[prev & mask] != seq) {
352 			prev = seq_list->list[prev & mask];
353 			if (prev == L2CAP_SEQ_LIST_TAIL)
354 				return L2CAP_SEQ_LIST_CLEAR;
355 		}
356 
357 		/* Unlink the number from the list and clear it */
358 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
359 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
360 		if (seq_list->tail == seq)
361 			seq_list->tail = prev;
362 	}
363 	return seq;
364 }
365 
366 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
367 {
368 	/* Remove the head in constant time */
369 	return l2cap_seq_list_remove(seq_list, seq_list->head);
370 }
371 
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 	u16 i;
375 
376 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 		return;
378 
379 	for (i = 0; i <= seq_list->mask; i++)
380 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381 
382 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385 
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 	u16 mask = seq_list->mask;
389 
390 	/* All appends happen in constant time */
391 
392 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 		return;
394 
395 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 		seq_list->head = seq;
397 	else
398 		seq_list->list[seq_list->tail & mask] = seq;
399 
400 	seq_list->tail = seq;
401 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403 
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 					       chan_timer.work);
408 	struct l2cap_conn *conn = chan->conn;
409 	int reason;
410 
411 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412 
413 	mutex_lock(&conn->chan_lock);
414 	l2cap_chan_lock(chan);
415 
416 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 		reason = ECONNREFUSED;
418 	else if (chan->state == BT_CONNECT &&
419 		 chan->sec_level != BT_SECURITY_SDP)
420 		reason = ECONNREFUSED;
421 	else
422 		reason = ETIMEDOUT;
423 
424 	l2cap_chan_close(chan, reason);
425 
426 	l2cap_chan_unlock(chan);
427 
428 	chan->ops->close(chan);
429 	mutex_unlock(&conn->chan_lock);
430 
431 	l2cap_chan_put(chan);
432 }
433 
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 	struct l2cap_chan *chan;
437 
438 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 	if (!chan)
440 		return NULL;
441 
442 	mutex_init(&chan->lock);
443 
444 	write_lock(&chan_list_lock);
445 	list_add(&chan->global_l, &chan_list);
446 	write_unlock(&chan_list_lock);
447 
448 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
449 
450 	chan->state = BT_OPEN;
451 
452 	kref_init(&chan->kref);
453 
454 	/* This flag is cleared in l2cap_chan_ready() */
455 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
456 
457 	BT_DBG("chan %p", chan);
458 
459 	return chan;
460 }
461 
462 static void l2cap_chan_destroy(struct kref *kref)
463 {
464 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
465 
466 	BT_DBG("chan %p", chan);
467 
468 	write_lock(&chan_list_lock);
469 	list_del(&chan->global_l);
470 	write_unlock(&chan_list_lock);
471 
472 	kfree(chan);
473 }
474 
475 void l2cap_chan_hold(struct l2cap_chan *c)
476 {
477 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
478 
479 	kref_get(&c->kref);
480 }
481 
482 void l2cap_chan_put(struct l2cap_chan *c)
483 {
484 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
485 
486 	kref_put(&c->kref, l2cap_chan_destroy);
487 }
488 
489 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
490 {
491 	chan->fcs  = L2CAP_FCS_CRC16;
492 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
493 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
494 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
495 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
496 	chan->sec_level = BT_SECURITY_LOW;
497 
498 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
499 }
500 
501 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
502 {
503 	chan->sdu = NULL;
504 	chan->sdu_last_frag = NULL;
505 	chan->sdu_len = 0;
506 	chan->tx_credits = 0;
507 	chan->rx_credits = le_max_credits;
508 	chan->mps = min_t(u16, chan->imtu, L2CAP_LE_DEFAULT_MPS);
509 
510 	skb_queue_head_init(&chan->tx_q);
511 }
512 
513 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
514 {
515 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
516 	       __le16_to_cpu(chan->psm), chan->dcid);
517 
518 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
519 
520 	chan->conn = conn;
521 
522 	switch (chan->chan_type) {
523 	case L2CAP_CHAN_CONN_ORIENTED:
524 		if (conn->hcon->type == LE_LINK) {
525 			if (chan->dcid == L2CAP_CID_ATT) {
526 				chan->omtu = L2CAP_DEFAULT_MTU;
527 				chan->scid = L2CAP_CID_ATT;
528 			} else {
529 				chan->scid = l2cap_alloc_cid(conn);
530 			}
531 		} else {
532 			/* Alloc CID for connection-oriented socket */
533 			chan->scid = l2cap_alloc_cid(conn);
534 			chan->omtu = L2CAP_DEFAULT_MTU;
535 		}
536 		break;
537 
538 	case L2CAP_CHAN_CONN_LESS:
539 		/* Connectionless socket */
540 		chan->scid = L2CAP_CID_CONN_LESS;
541 		chan->dcid = L2CAP_CID_CONN_LESS;
542 		chan->omtu = L2CAP_DEFAULT_MTU;
543 		break;
544 
545 	case L2CAP_CHAN_CONN_FIX_A2MP:
546 		chan->scid = L2CAP_CID_A2MP;
547 		chan->dcid = L2CAP_CID_A2MP;
548 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
549 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
550 		break;
551 
552 	default:
553 		/* Raw socket can send/recv signalling messages only */
554 		chan->scid = L2CAP_CID_SIGNALING;
555 		chan->dcid = L2CAP_CID_SIGNALING;
556 		chan->omtu = L2CAP_DEFAULT_MTU;
557 	}
558 
559 	chan->local_id		= L2CAP_BESTEFFORT_ID;
560 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
561 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
562 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
563 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
564 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
565 
566 	l2cap_chan_hold(chan);
567 
568 	hci_conn_hold(conn->hcon);
569 
570 	list_add(&chan->list, &conn->chan_l);
571 }
572 
573 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
574 {
575 	mutex_lock(&conn->chan_lock);
576 	__l2cap_chan_add(conn, chan);
577 	mutex_unlock(&conn->chan_lock);
578 }
579 
580 void l2cap_chan_del(struct l2cap_chan *chan, int err)
581 {
582 	struct l2cap_conn *conn = chan->conn;
583 
584 	__clear_chan_timer(chan);
585 
586 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
587 
588 	if (conn) {
589 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
590 		/* Delete from channel list */
591 		list_del(&chan->list);
592 
593 		l2cap_chan_put(chan);
594 
595 		chan->conn = NULL;
596 
597 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
598 			hci_conn_drop(conn->hcon);
599 
600 		if (mgr && mgr->bredr_chan == chan)
601 			mgr->bredr_chan = NULL;
602 	}
603 
604 	if (chan->hs_hchan) {
605 		struct hci_chan *hs_hchan = chan->hs_hchan;
606 
607 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
608 		amp_disconnect_logical_link(hs_hchan);
609 	}
610 
611 	chan->ops->teardown(chan, err);
612 
613 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
614 		return;
615 
616 	switch(chan->mode) {
617 	case L2CAP_MODE_BASIC:
618 		break;
619 
620 	case L2CAP_MODE_LE_FLOWCTL:
621 		skb_queue_purge(&chan->tx_q);
622 		break;
623 
624 	case L2CAP_MODE_ERTM:
625 		__clear_retrans_timer(chan);
626 		__clear_monitor_timer(chan);
627 		__clear_ack_timer(chan);
628 
629 		skb_queue_purge(&chan->srej_q);
630 
631 		l2cap_seq_list_free(&chan->srej_list);
632 		l2cap_seq_list_free(&chan->retrans_list);
633 
634 		/* fall through */
635 
636 	case L2CAP_MODE_STREAMING:
637 		skb_queue_purge(&chan->tx_q);
638 		break;
639 	}
640 
641 	return;
642 }
643 
644 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
645 {
646 	struct l2cap_conn *conn = chan->conn;
647 	struct l2cap_le_conn_rsp rsp;
648 	u16 result;
649 
650 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
651 		result = L2CAP_CR_AUTHORIZATION;
652 	else
653 		result = L2CAP_CR_BAD_PSM;
654 
655 	l2cap_state_change(chan, BT_DISCONN);
656 
657 	rsp.dcid    = cpu_to_le16(chan->scid);
658 	rsp.mtu     = cpu_to_le16(chan->imtu);
659 	rsp.mps     = cpu_to_le16(chan->mps);
660 	rsp.credits = cpu_to_le16(chan->rx_credits);
661 	rsp.result  = cpu_to_le16(result);
662 
663 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
664 		       &rsp);
665 }
666 
667 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
668 {
669 	struct l2cap_conn *conn = chan->conn;
670 	struct l2cap_conn_rsp rsp;
671 	u16 result;
672 
673 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
674 		result = L2CAP_CR_SEC_BLOCK;
675 	else
676 		result = L2CAP_CR_BAD_PSM;
677 
678 	l2cap_state_change(chan, BT_DISCONN);
679 
680 	rsp.scid   = cpu_to_le16(chan->dcid);
681 	rsp.dcid   = cpu_to_le16(chan->scid);
682 	rsp.result = cpu_to_le16(result);
683 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
684 
685 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
686 }
687 
688 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
689 {
690 	struct l2cap_conn *conn = chan->conn;
691 
692 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
693 
694 	switch (chan->state) {
695 	case BT_LISTEN:
696 		chan->ops->teardown(chan, 0);
697 		break;
698 
699 	case BT_CONNECTED:
700 	case BT_CONFIG:
701 		/* ATT uses L2CAP_CHAN_CONN_ORIENTED so we must also
702 		 * check for chan->psm.
703 		 */
704 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && chan->psm) {
705 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
706 			l2cap_send_disconn_req(chan, reason);
707 		} else
708 			l2cap_chan_del(chan, reason);
709 		break;
710 
711 	case BT_CONNECT2:
712 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
713 			if (conn->hcon->type == ACL_LINK)
714 				l2cap_chan_connect_reject(chan);
715 			else if (conn->hcon->type == LE_LINK)
716 				l2cap_chan_le_connect_reject(chan);
717 		}
718 
719 		l2cap_chan_del(chan, reason);
720 		break;
721 
722 	case BT_CONNECT:
723 	case BT_DISCONN:
724 		l2cap_chan_del(chan, reason);
725 		break;
726 
727 	default:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 	}
731 }
732 
733 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
734 {
735 	switch (chan->chan_type) {
736 	case L2CAP_CHAN_RAW:
737 		switch (chan->sec_level) {
738 		case BT_SECURITY_HIGH:
739 			return HCI_AT_DEDICATED_BONDING_MITM;
740 		case BT_SECURITY_MEDIUM:
741 			return HCI_AT_DEDICATED_BONDING;
742 		default:
743 			return HCI_AT_NO_BONDING;
744 		}
745 		break;
746 	case L2CAP_CHAN_CONN_LESS:
747 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
748 			if (chan->sec_level == BT_SECURITY_LOW)
749 				chan->sec_level = BT_SECURITY_SDP;
750 		}
751 		if (chan->sec_level == BT_SECURITY_HIGH)
752 			return HCI_AT_NO_BONDING_MITM;
753 		else
754 			return HCI_AT_NO_BONDING;
755 		break;
756 	case L2CAP_CHAN_CONN_ORIENTED:
757 		if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
758 			if (chan->sec_level == BT_SECURITY_LOW)
759 				chan->sec_level = BT_SECURITY_SDP;
760 
761 			if (chan->sec_level == BT_SECURITY_HIGH)
762 				return HCI_AT_NO_BONDING_MITM;
763 			else
764 				return HCI_AT_NO_BONDING;
765 		}
766 		/* fall through */
767 	default:
768 		switch (chan->sec_level) {
769 		case BT_SECURITY_HIGH:
770 			return HCI_AT_GENERAL_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_GENERAL_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	}
778 }
779 
780 /* Service level security */
781 int l2cap_chan_check_security(struct l2cap_chan *chan)
782 {
783 	struct l2cap_conn *conn = chan->conn;
784 	__u8 auth_type;
785 
786 	if (conn->hcon->type == LE_LINK)
787 		return smp_conn_security(conn->hcon, chan->sec_level);
788 
789 	auth_type = l2cap_get_auth_type(chan);
790 
791 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
792 }
793 
794 static u8 l2cap_get_ident(struct l2cap_conn *conn)
795 {
796 	u8 id;
797 
798 	/* Get next available identificator.
799 	 *    1 - 128 are used by kernel.
800 	 *  129 - 199 are reserved.
801 	 *  200 - 254 are used by utilities like l2ping, etc.
802 	 */
803 
804 	spin_lock(&conn->lock);
805 
806 	if (++conn->tx_ident > 128)
807 		conn->tx_ident = 1;
808 
809 	id = conn->tx_ident;
810 
811 	spin_unlock(&conn->lock);
812 
813 	return id;
814 }
815 
816 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
817 			   void *data)
818 {
819 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
820 	u8 flags;
821 
822 	BT_DBG("code 0x%2.2x", code);
823 
824 	if (!skb)
825 		return;
826 
827 	if (lmp_no_flush_capable(conn->hcon->hdev))
828 		flags = ACL_START_NO_FLUSH;
829 	else
830 		flags = ACL_START;
831 
832 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
833 	skb->priority = HCI_PRIO_MAX;
834 
835 	hci_send_acl(conn->hchan, skb, flags);
836 }
837 
838 static bool __chan_is_moving(struct l2cap_chan *chan)
839 {
840 	return chan->move_state != L2CAP_MOVE_STABLE &&
841 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
842 }
843 
844 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
845 {
846 	struct hci_conn *hcon = chan->conn->hcon;
847 	u16 flags;
848 
849 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
850 	       skb->priority);
851 
852 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
853 		if (chan->hs_hchan)
854 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
855 		else
856 			kfree_skb(skb);
857 
858 		return;
859 	}
860 
861 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
862 	    lmp_no_flush_capable(hcon->hdev))
863 		flags = ACL_START_NO_FLUSH;
864 	else
865 		flags = ACL_START;
866 
867 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
868 	hci_send_acl(chan->conn->hchan, skb, flags);
869 }
870 
871 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
872 {
873 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
874 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
875 
876 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
877 		/* S-Frame */
878 		control->sframe = 1;
879 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
880 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
881 
882 		control->sar = 0;
883 		control->txseq = 0;
884 	} else {
885 		/* I-Frame */
886 		control->sframe = 0;
887 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
888 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
889 
890 		control->poll = 0;
891 		control->super = 0;
892 	}
893 }
894 
895 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
896 {
897 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
898 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
899 
900 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
901 		/* S-Frame */
902 		control->sframe = 1;
903 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
904 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
905 
906 		control->sar = 0;
907 		control->txseq = 0;
908 	} else {
909 		/* I-Frame */
910 		control->sframe = 0;
911 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
912 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
913 
914 		control->poll = 0;
915 		control->super = 0;
916 	}
917 }
918 
919 static inline void __unpack_control(struct l2cap_chan *chan,
920 				    struct sk_buff *skb)
921 {
922 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
923 		__unpack_extended_control(get_unaligned_le32(skb->data),
924 					  &bt_cb(skb)->control);
925 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
926 	} else {
927 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
928 					  &bt_cb(skb)->control);
929 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
930 	}
931 }
932 
933 static u32 __pack_extended_control(struct l2cap_ctrl *control)
934 {
935 	u32 packed;
936 
937 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
938 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
939 
940 	if (control->sframe) {
941 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
942 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
943 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
944 	} else {
945 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
946 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
947 	}
948 
949 	return packed;
950 }
951 
952 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
953 {
954 	u16 packed;
955 
956 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
957 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
958 
959 	if (control->sframe) {
960 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
961 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
962 		packed |= L2CAP_CTRL_FRAME_TYPE;
963 	} else {
964 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
965 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
966 	}
967 
968 	return packed;
969 }
970 
971 static inline void __pack_control(struct l2cap_chan *chan,
972 				  struct l2cap_ctrl *control,
973 				  struct sk_buff *skb)
974 {
975 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
976 		put_unaligned_le32(__pack_extended_control(control),
977 				   skb->data + L2CAP_HDR_SIZE);
978 	} else {
979 		put_unaligned_le16(__pack_enhanced_control(control),
980 				   skb->data + L2CAP_HDR_SIZE);
981 	}
982 }
983 
984 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
985 {
986 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
987 		return L2CAP_EXT_HDR_SIZE;
988 	else
989 		return L2CAP_ENH_HDR_SIZE;
990 }
991 
992 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
993 					       u32 control)
994 {
995 	struct sk_buff *skb;
996 	struct l2cap_hdr *lh;
997 	int hlen = __ertm_hdr_size(chan);
998 
999 	if (chan->fcs == L2CAP_FCS_CRC16)
1000 		hlen += L2CAP_FCS_SIZE;
1001 
1002 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1003 
1004 	if (!skb)
1005 		return ERR_PTR(-ENOMEM);
1006 
1007 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1008 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1009 	lh->cid = cpu_to_le16(chan->dcid);
1010 
1011 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1012 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1013 	else
1014 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1015 
1016 	if (chan->fcs == L2CAP_FCS_CRC16) {
1017 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1018 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1019 	}
1020 
1021 	skb->priority = HCI_PRIO_MAX;
1022 	return skb;
1023 }
1024 
1025 static void l2cap_send_sframe(struct l2cap_chan *chan,
1026 			      struct l2cap_ctrl *control)
1027 {
1028 	struct sk_buff *skb;
1029 	u32 control_field;
1030 
1031 	BT_DBG("chan %p, control %p", chan, control);
1032 
1033 	if (!control->sframe)
1034 		return;
1035 
1036 	if (__chan_is_moving(chan))
1037 		return;
1038 
1039 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1040 	    !control->poll)
1041 		control->final = 1;
1042 
1043 	if (control->super == L2CAP_SUPER_RR)
1044 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1045 	else if (control->super == L2CAP_SUPER_RNR)
1046 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1047 
1048 	if (control->super != L2CAP_SUPER_SREJ) {
1049 		chan->last_acked_seq = control->reqseq;
1050 		__clear_ack_timer(chan);
1051 	}
1052 
1053 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1054 	       control->final, control->poll, control->super);
1055 
1056 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1057 		control_field = __pack_extended_control(control);
1058 	else
1059 		control_field = __pack_enhanced_control(control);
1060 
1061 	skb = l2cap_create_sframe_pdu(chan, control_field);
1062 	if (!IS_ERR(skb))
1063 		l2cap_do_send(chan, skb);
1064 }
1065 
1066 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1067 {
1068 	struct l2cap_ctrl control;
1069 
1070 	BT_DBG("chan %p, poll %d", chan, poll);
1071 
1072 	memset(&control, 0, sizeof(control));
1073 	control.sframe = 1;
1074 	control.poll = poll;
1075 
1076 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1077 		control.super = L2CAP_SUPER_RNR;
1078 	else
1079 		control.super = L2CAP_SUPER_RR;
1080 
1081 	control.reqseq = chan->buffer_seq;
1082 	l2cap_send_sframe(chan, &control);
1083 }
1084 
1085 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1086 {
1087 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1088 }
1089 
1090 static bool __amp_capable(struct l2cap_chan *chan)
1091 {
1092 	struct l2cap_conn *conn = chan->conn;
1093 	struct hci_dev *hdev;
1094 	bool amp_available = false;
1095 
1096 	if (!conn->hs_enabled)
1097 		return false;
1098 
1099 	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1100 		return false;
1101 
1102 	read_lock(&hci_dev_list_lock);
1103 	list_for_each_entry(hdev, &hci_dev_list, list) {
1104 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1105 		    test_bit(HCI_UP, &hdev->flags)) {
1106 			amp_available = true;
1107 			break;
1108 		}
1109 	}
1110 	read_unlock(&hci_dev_list_lock);
1111 
1112 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1113 		return amp_available;
1114 
1115 	return false;
1116 }
1117 
1118 static bool l2cap_check_efs(struct l2cap_chan *chan)
1119 {
1120 	/* Check EFS parameters */
1121 	return true;
1122 }
1123 
1124 void l2cap_send_conn_req(struct l2cap_chan *chan)
1125 {
1126 	struct l2cap_conn *conn = chan->conn;
1127 	struct l2cap_conn_req req;
1128 
1129 	req.scid = cpu_to_le16(chan->scid);
1130 	req.psm  = chan->psm;
1131 
1132 	chan->ident = l2cap_get_ident(conn);
1133 
1134 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1135 
1136 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1137 }
1138 
1139 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1140 {
1141 	struct l2cap_create_chan_req req;
1142 	req.scid = cpu_to_le16(chan->scid);
1143 	req.psm  = chan->psm;
1144 	req.amp_id = amp_id;
1145 
1146 	chan->ident = l2cap_get_ident(chan->conn);
1147 
1148 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1149 		       sizeof(req), &req);
1150 }
1151 
1152 static void l2cap_move_setup(struct l2cap_chan *chan)
1153 {
1154 	struct sk_buff *skb;
1155 
1156 	BT_DBG("chan %p", chan);
1157 
1158 	if (chan->mode != L2CAP_MODE_ERTM)
1159 		return;
1160 
1161 	__clear_retrans_timer(chan);
1162 	__clear_monitor_timer(chan);
1163 	__clear_ack_timer(chan);
1164 
1165 	chan->retry_count = 0;
1166 	skb_queue_walk(&chan->tx_q, skb) {
1167 		if (bt_cb(skb)->control.retries)
1168 			bt_cb(skb)->control.retries = 1;
1169 		else
1170 			break;
1171 	}
1172 
1173 	chan->expected_tx_seq = chan->buffer_seq;
1174 
1175 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1176 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1177 	l2cap_seq_list_clear(&chan->retrans_list);
1178 	l2cap_seq_list_clear(&chan->srej_list);
1179 	skb_queue_purge(&chan->srej_q);
1180 
1181 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1182 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1183 
1184 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1185 }
1186 
1187 static void l2cap_move_done(struct l2cap_chan *chan)
1188 {
1189 	u8 move_role = chan->move_role;
1190 	BT_DBG("chan %p", chan);
1191 
1192 	chan->move_state = L2CAP_MOVE_STABLE;
1193 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1194 
1195 	if (chan->mode != L2CAP_MODE_ERTM)
1196 		return;
1197 
1198 	switch (move_role) {
1199 	case L2CAP_MOVE_ROLE_INITIATOR:
1200 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1201 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1202 		break;
1203 	case L2CAP_MOVE_ROLE_RESPONDER:
1204 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1205 		break;
1206 	}
1207 }
1208 
1209 static void l2cap_chan_ready(struct l2cap_chan *chan)
1210 {
1211 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1212 	chan->conf_state = 0;
1213 	__clear_chan_timer(chan);
1214 
1215 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1216 		chan->ops->suspend(chan);
1217 
1218 	chan->state = BT_CONNECTED;
1219 
1220 	chan->ops->ready(chan);
1221 }
1222 
1223 static void l2cap_le_connect(struct l2cap_chan *chan)
1224 {
1225 	struct l2cap_conn *conn = chan->conn;
1226 	struct l2cap_le_conn_req req;
1227 
1228 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1229 		return;
1230 
1231 	req.psm     = chan->psm;
1232 	req.scid    = cpu_to_le16(chan->scid);
1233 	req.mtu     = cpu_to_le16(chan->imtu);
1234 	req.mps     = cpu_to_le16(chan->mps);
1235 	req.credits = cpu_to_le16(chan->rx_credits);
1236 
1237 	chan->ident = l2cap_get_ident(conn);
1238 
1239 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1240 		       sizeof(req), &req);
1241 }
1242 
1243 static void l2cap_le_start(struct l2cap_chan *chan)
1244 {
1245 	struct l2cap_conn *conn = chan->conn;
1246 
1247 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1248 		return;
1249 
1250 	if (!chan->psm) {
1251 		l2cap_chan_ready(chan);
1252 		return;
1253 	}
1254 
1255 	if (chan->state == BT_CONNECT)
1256 		l2cap_le_connect(chan);
1257 }
1258 
1259 static void l2cap_start_connection(struct l2cap_chan *chan)
1260 {
1261 	if (__amp_capable(chan)) {
1262 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1263 		a2mp_discover_amp(chan);
1264 	} else if (chan->conn->hcon->type == LE_LINK) {
1265 		l2cap_le_start(chan);
1266 	} else {
1267 		l2cap_send_conn_req(chan);
1268 	}
1269 }
1270 
1271 static void l2cap_do_start(struct l2cap_chan *chan)
1272 {
1273 	struct l2cap_conn *conn = chan->conn;
1274 
1275 	if (conn->hcon->type == LE_LINK) {
1276 		l2cap_le_start(chan);
1277 		return;
1278 	}
1279 
1280 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1281 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1282 			return;
1283 
1284 		if (l2cap_chan_check_security(chan) &&
1285 		    __l2cap_no_conn_pending(chan)) {
1286 			l2cap_start_connection(chan);
1287 		}
1288 	} else {
1289 		struct l2cap_info_req req;
1290 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1291 
1292 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1293 		conn->info_ident = l2cap_get_ident(conn);
1294 
1295 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1296 
1297 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1298 			       sizeof(req), &req);
1299 	}
1300 }
1301 
1302 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1303 {
1304 	u32 local_feat_mask = l2cap_feat_mask;
1305 	if (!disable_ertm)
1306 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1307 
1308 	switch (mode) {
1309 	case L2CAP_MODE_ERTM:
1310 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1311 	case L2CAP_MODE_STREAMING:
1312 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1313 	default:
1314 		return 0x00;
1315 	}
1316 }
1317 
1318 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1319 {
1320 	struct l2cap_conn *conn = chan->conn;
1321 	struct l2cap_disconn_req req;
1322 
1323 	if (!conn)
1324 		return;
1325 
1326 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1327 		__clear_retrans_timer(chan);
1328 		__clear_monitor_timer(chan);
1329 		__clear_ack_timer(chan);
1330 	}
1331 
1332 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1333 		l2cap_state_change(chan, BT_DISCONN);
1334 		return;
1335 	}
1336 
1337 	req.dcid = cpu_to_le16(chan->dcid);
1338 	req.scid = cpu_to_le16(chan->scid);
1339 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1340 		       sizeof(req), &req);
1341 
1342 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1343 }
1344 
1345 /* ---- L2CAP connections ---- */
1346 static void l2cap_conn_start(struct l2cap_conn *conn)
1347 {
1348 	struct l2cap_chan *chan, *tmp;
1349 
1350 	BT_DBG("conn %p", conn);
1351 
1352 	mutex_lock(&conn->chan_lock);
1353 
1354 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1355 		l2cap_chan_lock(chan);
1356 
1357 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1358 			l2cap_chan_unlock(chan);
1359 			continue;
1360 		}
1361 
1362 		if (chan->state == BT_CONNECT) {
1363 			if (!l2cap_chan_check_security(chan) ||
1364 			    !__l2cap_no_conn_pending(chan)) {
1365 				l2cap_chan_unlock(chan);
1366 				continue;
1367 			}
1368 
1369 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1370 			    && test_bit(CONF_STATE2_DEVICE,
1371 					&chan->conf_state)) {
1372 				l2cap_chan_close(chan, ECONNRESET);
1373 				l2cap_chan_unlock(chan);
1374 				continue;
1375 			}
1376 
1377 			l2cap_start_connection(chan);
1378 
1379 		} else if (chan->state == BT_CONNECT2) {
1380 			struct l2cap_conn_rsp rsp;
1381 			char buf[128];
1382 			rsp.scid = cpu_to_le16(chan->dcid);
1383 			rsp.dcid = cpu_to_le16(chan->scid);
1384 
1385 			if (l2cap_chan_check_security(chan)) {
1386 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1387 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1388 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1389 					chan->ops->defer(chan);
1390 
1391 				} else {
1392 					l2cap_state_change(chan, BT_CONFIG);
1393 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1394 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1395 				}
1396 			} else {
1397 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1398 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1399 			}
1400 
1401 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1402 				       sizeof(rsp), &rsp);
1403 
1404 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1405 			    rsp.result != L2CAP_CR_SUCCESS) {
1406 				l2cap_chan_unlock(chan);
1407 				continue;
1408 			}
1409 
1410 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1411 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1412 				       l2cap_build_conf_req(chan, buf), buf);
1413 			chan->num_conf_req++;
1414 		}
1415 
1416 		l2cap_chan_unlock(chan);
1417 	}
1418 
1419 	mutex_unlock(&conn->chan_lock);
1420 }
1421 
1422 /* Find socket with cid and source/destination bdaddr.
1423  * Returns closest match, locked.
1424  */
1425 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1426 						    bdaddr_t *src,
1427 						    bdaddr_t *dst)
1428 {
1429 	struct l2cap_chan *c, *c1 = NULL;
1430 
1431 	read_lock(&chan_list_lock);
1432 
1433 	list_for_each_entry(c, &chan_list, global_l) {
1434 		if (state && c->state != state)
1435 			continue;
1436 
1437 		if (c->scid == cid) {
1438 			int src_match, dst_match;
1439 			int src_any, dst_any;
1440 
1441 			/* Exact match. */
1442 			src_match = !bacmp(&c->src, src);
1443 			dst_match = !bacmp(&c->dst, dst);
1444 			if (src_match && dst_match) {
1445 				read_unlock(&chan_list_lock);
1446 				return c;
1447 			}
1448 
1449 			/* Closest match */
1450 			src_any = !bacmp(&c->src, BDADDR_ANY);
1451 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1452 			if ((src_match && dst_any) || (src_any && dst_match) ||
1453 			    (src_any && dst_any))
1454 				c1 = c;
1455 		}
1456 	}
1457 
1458 	read_unlock(&chan_list_lock);
1459 
1460 	return c1;
1461 }
1462 
1463 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1464 {
1465 	struct hci_conn *hcon = conn->hcon;
1466 	struct l2cap_chan *chan, *pchan;
1467 	u8 dst_type;
1468 
1469 	BT_DBG("");
1470 
1471 	/* Check if we have socket listening on cid */
1472 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1473 					  &hcon->src, &hcon->dst);
1474 	if (!pchan)
1475 		return;
1476 
1477 	/* Client ATT sockets should override the server one */
1478 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1479 		return;
1480 
1481 	dst_type = bdaddr_type(hcon, hcon->dst_type);
1482 
1483 	/* If device is blocked, do not create a channel for it */
1484 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1485 		return;
1486 
1487 	l2cap_chan_lock(pchan);
1488 
1489 	chan = pchan->ops->new_connection(pchan);
1490 	if (!chan)
1491 		goto clean;
1492 
1493 	chan->dcid = L2CAP_CID_ATT;
1494 
1495 	bacpy(&chan->src, &hcon->src);
1496 	bacpy(&chan->dst, &hcon->dst);
1497 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1498 	chan->dst_type = dst_type;
1499 
1500 	__l2cap_chan_add(conn, chan);
1501 
1502 clean:
1503 	l2cap_chan_unlock(pchan);
1504 }
1505 
1506 static void l2cap_conn_ready(struct l2cap_conn *conn)
1507 {
1508 	struct l2cap_chan *chan;
1509 	struct hci_conn *hcon = conn->hcon;
1510 
1511 	BT_DBG("conn %p", conn);
1512 
1513 	/* For outgoing pairing which doesn't necessarily have an
1514 	 * associated socket (e.g. mgmt_pair_device).
1515 	 */
1516 	if (hcon->out && hcon->type == LE_LINK)
1517 		smp_conn_security(hcon, hcon->pending_sec_level);
1518 
1519 	mutex_lock(&conn->chan_lock);
1520 
1521 	if (hcon->type == LE_LINK)
1522 		l2cap_le_conn_ready(conn);
1523 
1524 	list_for_each_entry(chan, &conn->chan_l, list) {
1525 
1526 		l2cap_chan_lock(chan);
1527 
1528 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1529 			l2cap_chan_unlock(chan);
1530 			continue;
1531 		}
1532 
1533 		if (hcon->type == LE_LINK) {
1534 			l2cap_le_start(chan);
1535 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1536 			l2cap_chan_ready(chan);
1537 
1538 		} else if (chan->state == BT_CONNECT) {
1539 			l2cap_do_start(chan);
1540 		}
1541 
1542 		l2cap_chan_unlock(chan);
1543 	}
1544 
1545 	mutex_unlock(&conn->chan_lock);
1546 }
1547 
1548 /* Notify sockets that we cannot guaranty reliability anymore */
1549 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1550 {
1551 	struct l2cap_chan *chan;
1552 
1553 	BT_DBG("conn %p", conn);
1554 
1555 	mutex_lock(&conn->chan_lock);
1556 
1557 	list_for_each_entry(chan, &conn->chan_l, list) {
1558 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1559 			l2cap_chan_set_err(chan, err);
1560 	}
1561 
1562 	mutex_unlock(&conn->chan_lock);
1563 }
1564 
1565 static void l2cap_info_timeout(struct work_struct *work)
1566 {
1567 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1568 					       info_timer.work);
1569 
1570 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1571 	conn->info_ident = 0;
1572 
1573 	l2cap_conn_start(conn);
1574 }
1575 
1576 /*
1577  * l2cap_user
1578  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1579  * callback is called during registration. The ->remove callback is called
1580  * during unregistration.
1581  * An l2cap_user object can either be explicitly unregistered or when the
1582  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1583  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1584  * External modules must own a reference to the l2cap_conn object if they intend
1585  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1586  * any time if they don't.
1587  */
1588 
1589 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1590 {
1591 	struct hci_dev *hdev = conn->hcon->hdev;
1592 	int ret;
1593 
1594 	/* We need to check whether l2cap_conn is registered. If it is not, we
1595 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1596 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1597 	 * relies on the parent hci_conn object to be locked. This itself relies
1598 	 * on the hci_dev object to be locked. So we must lock the hci device
1599 	 * here, too. */
1600 
1601 	hci_dev_lock(hdev);
1602 
1603 	if (user->list.next || user->list.prev) {
1604 		ret = -EINVAL;
1605 		goto out_unlock;
1606 	}
1607 
1608 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1609 	if (!conn->hchan) {
1610 		ret = -ENODEV;
1611 		goto out_unlock;
1612 	}
1613 
1614 	ret = user->probe(conn, user);
1615 	if (ret)
1616 		goto out_unlock;
1617 
1618 	list_add(&user->list, &conn->users);
1619 	ret = 0;
1620 
1621 out_unlock:
1622 	hci_dev_unlock(hdev);
1623 	return ret;
1624 }
1625 EXPORT_SYMBOL(l2cap_register_user);
1626 
1627 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1628 {
1629 	struct hci_dev *hdev = conn->hcon->hdev;
1630 
1631 	hci_dev_lock(hdev);
1632 
1633 	if (!user->list.next || !user->list.prev)
1634 		goto out_unlock;
1635 
1636 	list_del(&user->list);
1637 	user->list.next = NULL;
1638 	user->list.prev = NULL;
1639 	user->remove(conn, user);
1640 
1641 out_unlock:
1642 	hci_dev_unlock(hdev);
1643 }
1644 EXPORT_SYMBOL(l2cap_unregister_user);
1645 
1646 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1647 {
1648 	struct l2cap_user *user;
1649 
1650 	while (!list_empty(&conn->users)) {
1651 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1652 		list_del(&user->list);
1653 		user->list.next = NULL;
1654 		user->list.prev = NULL;
1655 		user->remove(conn, user);
1656 	}
1657 }
1658 
1659 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1660 {
1661 	struct l2cap_conn *conn = hcon->l2cap_data;
1662 	struct l2cap_chan *chan, *l;
1663 
1664 	if (!conn)
1665 		return;
1666 
1667 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1668 
1669 	kfree_skb(conn->rx_skb);
1670 
1671 	l2cap_unregister_all_users(conn);
1672 
1673 	mutex_lock(&conn->chan_lock);
1674 
1675 	/* Kill channels */
1676 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1677 		l2cap_chan_hold(chan);
1678 		l2cap_chan_lock(chan);
1679 
1680 		l2cap_chan_del(chan, err);
1681 
1682 		l2cap_chan_unlock(chan);
1683 
1684 		chan->ops->close(chan);
1685 		l2cap_chan_put(chan);
1686 	}
1687 
1688 	mutex_unlock(&conn->chan_lock);
1689 
1690 	hci_chan_del(conn->hchan);
1691 
1692 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1693 		cancel_delayed_work_sync(&conn->info_timer);
1694 
1695 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1696 		cancel_delayed_work_sync(&conn->security_timer);
1697 		smp_chan_destroy(conn);
1698 	}
1699 
1700 	hcon->l2cap_data = NULL;
1701 	conn->hchan = NULL;
1702 	l2cap_conn_put(conn);
1703 }
1704 
1705 static void security_timeout(struct work_struct *work)
1706 {
1707 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1708 					       security_timer.work);
1709 
1710 	BT_DBG("conn %p", conn);
1711 
1712 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1713 		smp_chan_destroy(conn);
1714 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1715 	}
1716 }
1717 
1718 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1719 {
1720 	struct l2cap_conn *conn = hcon->l2cap_data;
1721 	struct hci_chan *hchan;
1722 
1723 	if (conn)
1724 		return conn;
1725 
1726 	hchan = hci_chan_create(hcon);
1727 	if (!hchan)
1728 		return NULL;
1729 
1730 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1731 	if (!conn) {
1732 		hci_chan_del(hchan);
1733 		return NULL;
1734 	}
1735 
1736 	kref_init(&conn->ref);
1737 	hcon->l2cap_data = conn;
1738 	conn->hcon = hcon;
1739 	hci_conn_get(conn->hcon);
1740 	conn->hchan = hchan;
1741 
1742 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1743 
1744 	switch (hcon->type) {
1745 	case LE_LINK:
1746 		if (hcon->hdev->le_mtu) {
1747 			conn->mtu = hcon->hdev->le_mtu;
1748 			break;
1749 		}
1750 		/* fall through */
1751 	default:
1752 		conn->mtu = hcon->hdev->acl_mtu;
1753 		break;
1754 	}
1755 
1756 	conn->feat_mask = 0;
1757 
1758 	if (hcon->type == ACL_LINK)
1759 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
1760 					    &hcon->hdev->dev_flags);
1761 
1762 	spin_lock_init(&conn->lock);
1763 	mutex_init(&conn->chan_lock);
1764 
1765 	INIT_LIST_HEAD(&conn->chan_l);
1766 	INIT_LIST_HEAD(&conn->users);
1767 
1768 	if (hcon->type == LE_LINK)
1769 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1770 	else
1771 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1772 
1773 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1774 
1775 	return conn;
1776 }
1777 
1778 static void l2cap_conn_free(struct kref *ref)
1779 {
1780 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1781 
1782 	hci_conn_put(conn->hcon);
1783 	kfree(conn);
1784 }
1785 
1786 void l2cap_conn_get(struct l2cap_conn *conn)
1787 {
1788 	kref_get(&conn->ref);
1789 }
1790 EXPORT_SYMBOL(l2cap_conn_get);
1791 
1792 void l2cap_conn_put(struct l2cap_conn *conn)
1793 {
1794 	kref_put(&conn->ref, l2cap_conn_free);
1795 }
1796 EXPORT_SYMBOL(l2cap_conn_put);
1797 
1798 /* ---- Socket interface ---- */
1799 
1800 /* Find socket with psm and source / destination bdaddr.
1801  * Returns closest match.
1802  */
1803 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1804 						   bdaddr_t *src,
1805 						   bdaddr_t *dst,
1806 						   u8 link_type)
1807 {
1808 	struct l2cap_chan *c, *c1 = NULL;
1809 
1810 	read_lock(&chan_list_lock);
1811 
1812 	list_for_each_entry(c, &chan_list, global_l) {
1813 		if (state && c->state != state)
1814 			continue;
1815 
1816 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1817 			continue;
1818 
1819 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1820 			continue;
1821 
1822 		if (c->psm == psm) {
1823 			int src_match, dst_match;
1824 			int src_any, dst_any;
1825 
1826 			/* Exact match. */
1827 			src_match = !bacmp(&c->src, src);
1828 			dst_match = !bacmp(&c->dst, dst);
1829 			if (src_match && dst_match) {
1830 				read_unlock(&chan_list_lock);
1831 				return c;
1832 			}
1833 
1834 			/* Closest match */
1835 			src_any = !bacmp(&c->src, BDADDR_ANY);
1836 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1837 			if ((src_match && dst_any) || (src_any && dst_match) ||
1838 			    (src_any && dst_any))
1839 				c1 = c;
1840 		}
1841 	}
1842 
1843 	read_unlock(&chan_list_lock);
1844 
1845 	return c1;
1846 }
1847 
1848 static bool is_valid_psm(u16 psm, u8 dst_type)
1849 {
1850 	if (!psm)
1851 		return false;
1852 
1853 	if (bdaddr_type_is_le(dst_type))
1854 		return (psm <= 0x00ff);
1855 
1856 	/* PSM must be odd and lsb of upper byte must be 0 */
1857 	return ((psm & 0x0101) == 0x0001);
1858 }
1859 
1860 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1861 		       bdaddr_t *dst, u8 dst_type)
1862 {
1863 	struct l2cap_conn *conn;
1864 	struct hci_conn *hcon;
1865 	struct hci_dev *hdev;
1866 	__u8 auth_type;
1867 	int err;
1868 
1869 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
1870 	       dst_type, __le16_to_cpu(psm));
1871 
1872 	hdev = hci_get_route(dst, &chan->src);
1873 	if (!hdev)
1874 		return -EHOSTUNREACH;
1875 
1876 	hci_dev_lock(hdev);
1877 
1878 	l2cap_chan_lock(chan);
1879 
1880 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
1881 	    chan->chan_type != L2CAP_CHAN_RAW) {
1882 		err = -EINVAL;
1883 		goto done;
1884 	}
1885 
1886 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1887 		err = -EINVAL;
1888 		goto done;
1889 	}
1890 
1891 	switch (chan->mode) {
1892 	case L2CAP_MODE_BASIC:
1893 		break;
1894 	case L2CAP_MODE_LE_FLOWCTL:
1895 		l2cap_le_flowctl_init(chan);
1896 		break;
1897 	case L2CAP_MODE_ERTM:
1898 	case L2CAP_MODE_STREAMING:
1899 		if (!disable_ertm)
1900 			break;
1901 		/* fall through */
1902 	default:
1903 		err = -ENOTSUPP;
1904 		goto done;
1905 	}
1906 
1907 	switch (chan->state) {
1908 	case BT_CONNECT:
1909 	case BT_CONNECT2:
1910 	case BT_CONFIG:
1911 		/* Already connecting */
1912 		err = 0;
1913 		goto done;
1914 
1915 	case BT_CONNECTED:
1916 		/* Already connected */
1917 		err = -EISCONN;
1918 		goto done;
1919 
1920 	case BT_OPEN:
1921 	case BT_BOUND:
1922 		/* Can connect */
1923 		break;
1924 
1925 	default:
1926 		err = -EBADFD;
1927 		goto done;
1928 	}
1929 
1930 	/* Set destination address and psm */
1931 	bacpy(&chan->dst, dst);
1932 	chan->dst_type = dst_type;
1933 
1934 	chan->psm = psm;
1935 	chan->dcid = cid;
1936 
1937 	auth_type = l2cap_get_auth_type(chan);
1938 
1939 	if (bdaddr_type_is_le(dst_type))
1940 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1941 				   chan->sec_level, auth_type);
1942 	else
1943 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1944 				   chan->sec_level, auth_type);
1945 
1946 	if (IS_ERR(hcon)) {
1947 		err = PTR_ERR(hcon);
1948 		goto done;
1949 	}
1950 
1951 	conn = l2cap_conn_add(hcon);
1952 	if (!conn) {
1953 		hci_conn_drop(hcon);
1954 		err = -ENOMEM;
1955 		goto done;
1956 	}
1957 
1958 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
1959 		hci_conn_drop(hcon);
1960 		err = -EBUSY;
1961 		goto done;
1962 	}
1963 
1964 	/* Update source addr of the socket */
1965 	bacpy(&chan->src, &hcon->src);
1966 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1967 
1968 	l2cap_chan_unlock(chan);
1969 	l2cap_chan_add(conn, chan);
1970 	l2cap_chan_lock(chan);
1971 
1972 	/* l2cap_chan_add takes its own ref so we can drop this one */
1973 	hci_conn_drop(hcon);
1974 
1975 	l2cap_state_change(chan, BT_CONNECT);
1976 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
1977 
1978 	if (hcon->state == BT_CONNECTED) {
1979 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1980 			__clear_chan_timer(chan);
1981 			if (l2cap_chan_check_security(chan))
1982 				l2cap_state_change(chan, BT_CONNECTED);
1983 		} else
1984 			l2cap_do_start(chan);
1985 	}
1986 
1987 	err = 0;
1988 
1989 done:
1990 	l2cap_chan_unlock(chan);
1991 	hci_dev_unlock(hdev);
1992 	hci_dev_put(hdev);
1993 	return err;
1994 }
1995 
1996 static void l2cap_monitor_timeout(struct work_struct *work)
1997 {
1998 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1999 					       monitor_timer.work);
2000 
2001 	BT_DBG("chan %p", chan);
2002 
2003 	l2cap_chan_lock(chan);
2004 
2005 	if (!chan->conn) {
2006 		l2cap_chan_unlock(chan);
2007 		l2cap_chan_put(chan);
2008 		return;
2009 	}
2010 
2011 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2012 
2013 	l2cap_chan_unlock(chan);
2014 	l2cap_chan_put(chan);
2015 }
2016 
2017 static void l2cap_retrans_timeout(struct work_struct *work)
2018 {
2019 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2020 					       retrans_timer.work);
2021 
2022 	BT_DBG("chan %p", chan);
2023 
2024 	l2cap_chan_lock(chan);
2025 
2026 	if (!chan->conn) {
2027 		l2cap_chan_unlock(chan);
2028 		l2cap_chan_put(chan);
2029 		return;
2030 	}
2031 
2032 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2033 	l2cap_chan_unlock(chan);
2034 	l2cap_chan_put(chan);
2035 }
2036 
2037 static void l2cap_streaming_send(struct l2cap_chan *chan,
2038 				 struct sk_buff_head *skbs)
2039 {
2040 	struct sk_buff *skb;
2041 	struct l2cap_ctrl *control;
2042 
2043 	BT_DBG("chan %p, skbs %p", chan, skbs);
2044 
2045 	if (__chan_is_moving(chan))
2046 		return;
2047 
2048 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2049 
2050 	while (!skb_queue_empty(&chan->tx_q)) {
2051 
2052 		skb = skb_dequeue(&chan->tx_q);
2053 
2054 		bt_cb(skb)->control.retries = 1;
2055 		control = &bt_cb(skb)->control;
2056 
2057 		control->reqseq = 0;
2058 		control->txseq = chan->next_tx_seq;
2059 
2060 		__pack_control(chan, control, skb);
2061 
2062 		if (chan->fcs == L2CAP_FCS_CRC16) {
2063 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2064 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2065 		}
2066 
2067 		l2cap_do_send(chan, skb);
2068 
2069 		BT_DBG("Sent txseq %u", control->txseq);
2070 
2071 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2072 		chan->frames_sent++;
2073 	}
2074 }
2075 
2076 static int l2cap_ertm_send(struct l2cap_chan *chan)
2077 {
2078 	struct sk_buff *skb, *tx_skb;
2079 	struct l2cap_ctrl *control;
2080 	int sent = 0;
2081 
2082 	BT_DBG("chan %p", chan);
2083 
2084 	if (chan->state != BT_CONNECTED)
2085 		return -ENOTCONN;
2086 
2087 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2088 		return 0;
2089 
2090 	if (__chan_is_moving(chan))
2091 		return 0;
2092 
2093 	while (chan->tx_send_head &&
2094 	       chan->unacked_frames < chan->remote_tx_win &&
2095 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2096 
2097 		skb = chan->tx_send_head;
2098 
2099 		bt_cb(skb)->control.retries = 1;
2100 		control = &bt_cb(skb)->control;
2101 
2102 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2103 			control->final = 1;
2104 
2105 		control->reqseq = chan->buffer_seq;
2106 		chan->last_acked_seq = chan->buffer_seq;
2107 		control->txseq = chan->next_tx_seq;
2108 
2109 		__pack_control(chan, control, skb);
2110 
2111 		if (chan->fcs == L2CAP_FCS_CRC16) {
2112 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2113 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2114 		}
2115 
2116 		/* Clone after data has been modified. Data is assumed to be
2117 		   read-only (for locking purposes) on cloned sk_buffs.
2118 		 */
2119 		tx_skb = skb_clone(skb, GFP_KERNEL);
2120 
2121 		if (!tx_skb)
2122 			break;
2123 
2124 		__set_retrans_timer(chan);
2125 
2126 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2127 		chan->unacked_frames++;
2128 		chan->frames_sent++;
2129 		sent++;
2130 
2131 		if (skb_queue_is_last(&chan->tx_q, skb))
2132 			chan->tx_send_head = NULL;
2133 		else
2134 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2135 
2136 		l2cap_do_send(chan, tx_skb);
2137 		BT_DBG("Sent txseq %u", control->txseq);
2138 	}
2139 
2140 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2141 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2142 
2143 	return sent;
2144 }
2145 
2146 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2147 {
2148 	struct l2cap_ctrl control;
2149 	struct sk_buff *skb;
2150 	struct sk_buff *tx_skb;
2151 	u16 seq;
2152 
2153 	BT_DBG("chan %p", chan);
2154 
2155 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2156 		return;
2157 
2158 	if (__chan_is_moving(chan))
2159 		return;
2160 
2161 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2162 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2163 
2164 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2165 		if (!skb) {
2166 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2167 			       seq);
2168 			continue;
2169 		}
2170 
2171 		bt_cb(skb)->control.retries++;
2172 		control = bt_cb(skb)->control;
2173 
2174 		if (chan->max_tx != 0 &&
2175 		    bt_cb(skb)->control.retries > chan->max_tx) {
2176 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2177 			l2cap_send_disconn_req(chan, ECONNRESET);
2178 			l2cap_seq_list_clear(&chan->retrans_list);
2179 			break;
2180 		}
2181 
2182 		control.reqseq = chan->buffer_seq;
2183 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2184 			control.final = 1;
2185 		else
2186 			control.final = 0;
2187 
2188 		if (skb_cloned(skb)) {
2189 			/* Cloned sk_buffs are read-only, so we need a
2190 			 * writeable copy
2191 			 */
2192 			tx_skb = skb_copy(skb, GFP_KERNEL);
2193 		} else {
2194 			tx_skb = skb_clone(skb, GFP_KERNEL);
2195 		}
2196 
2197 		if (!tx_skb) {
2198 			l2cap_seq_list_clear(&chan->retrans_list);
2199 			break;
2200 		}
2201 
2202 		/* Update skb contents */
2203 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2204 			put_unaligned_le32(__pack_extended_control(&control),
2205 					   tx_skb->data + L2CAP_HDR_SIZE);
2206 		} else {
2207 			put_unaligned_le16(__pack_enhanced_control(&control),
2208 					   tx_skb->data + L2CAP_HDR_SIZE);
2209 		}
2210 
2211 		if (chan->fcs == L2CAP_FCS_CRC16) {
2212 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2213 			put_unaligned_le16(fcs, skb_put(tx_skb,
2214 							L2CAP_FCS_SIZE));
2215 		}
2216 
2217 		l2cap_do_send(chan, tx_skb);
2218 
2219 		BT_DBG("Resent txseq %d", control.txseq);
2220 
2221 		chan->last_acked_seq = chan->buffer_seq;
2222 	}
2223 }
2224 
2225 static void l2cap_retransmit(struct l2cap_chan *chan,
2226 			     struct l2cap_ctrl *control)
2227 {
2228 	BT_DBG("chan %p, control %p", chan, control);
2229 
2230 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2231 	l2cap_ertm_resend(chan);
2232 }
2233 
2234 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2235 				 struct l2cap_ctrl *control)
2236 {
2237 	struct sk_buff *skb;
2238 
2239 	BT_DBG("chan %p, control %p", chan, control);
2240 
2241 	if (control->poll)
2242 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2243 
2244 	l2cap_seq_list_clear(&chan->retrans_list);
2245 
2246 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2247 		return;
2248 
2249 	if (chan->unacked_frames) {
2250 		skb_queue_walk(&chan->tx_q, skb) {
2251 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2252 			    skb == chan->tx_send_head)
2253 				break;
2254 		}
2255 
2256 		skb_queue_walk_from(&chan->tx_q, skb) {
2257 			if (skb == chan->tx_send_head)
2258 				break;
2259 
2260 			l2cap_seq_list_append(&chan->retrans_list,
2261 					      bt_cb(skb)->control.txseq);
2262 		}
2263 
2264 		l2cap_ertm_resend(chan);
2265 	}
2266 }
2267 
2268 static void l2cap_send_ack(struct l2cap_chan *chan)
2269 {
2270 	struct l2cap_ctrl control;
2271 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2272 					 chan->last_acked_seq);
2273 	int threshold;
2274 
2275 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2276 	       chan, chan->last_acked_seq, chan->buffer_seq);
2277 
2278 	memset(&control, 0, sizeof(control));
2279 	control.sframe = 1;
2280 
2281 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2282 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2283 		__clear_ack_timer(chan);
2284 		control.super = L2CAP_SUPER_RNR;
2285 		control.reqseq = chan->buffer_seq;
2286 		l2cap_send_sframe(chan, &control);
2287 	} else {
2288 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2289 			l2cap_ertm_send(chan);
2290 			/* If any i-frames were sent, they included an ack */
2291 			if (chan->buffer_seq == chan->last_acked_seq)
2292 				frames_to_ack = 0;
2293 		}
2294 
2295 		/* Ack now if the window is 3/4ths full.
2296 		 * Calculate without mul or div
2297 		 */
2298 		threshold = chan->ack_win;
2299 		threshold += threshold << 1;
2300 		threshold >>= 2;
2301 
2302 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2303 		       threshold);
2304 
2305 		if (frames_to_ack >= threshold) {
2306 			__clear_ack_timer(chan);
2307 			control.super = L2CAP_SUPER_RR;
2308 			control.reqseq = chan->buffer_seq;
2309 			l2cap_send_sframe(chan, &control);
2310 			frames_to_ack = 0;
2311 		}
2312 
2313 		if (frames_to_ack)
2314 			__set_ack_timer(chan);
2315 	}
2316 }
2317 
2318 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2319 					 struct msghdr *msg, int len,
2320 					 int count, struct sk_buff *skb)
2321 {
2322 	struct l2cap_conn *conn = chan->conn;
2323 	struct sk_buff **frag;
2324 	int sent = 0;
2325 
2326 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2327 		return -EFAULT;
2328 
2329 	sent += count;
2330 	len  -= count;
2331 
2332 	/* Continuation fragments (no L2CAP header) */
2333 	frag = &skb_shinfo(skb)->frag_list;
2334 	while (len) {
2335 		struct sk_buff *tmp;
2336 
2337 		count = min_t(unsigned int, conn->mtu, len);
2338 
2339 		tmp = chan->ops->alloc_skb(chan, count,
2340 					   msg->msg_flags & MSG_DONTWAIT);
2341 		if (IS_ERR(tmp))
2342 			return PTR_ERR(tmp);
2343 
2344 		*frag = tmp;
2345 
2346 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2347 			return -EFAULT;
2348 
2349 		(*frag)->priority = skb->priority;
2350 
2351 		sent += count;
2352 		len  -= count;
2353 
2354 		skb->len += (*frag)->len;
2355 		skb->data_len += (*frag)->len;
2356 
2357 		frag = &(*frag)->next;
2358 	}
2359 
2360 	return sent;
2361 }
2362 
2363 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2364 						 struct msghdr *msg, size_t len,
2365 						 u32 priority)
2366 {
2367 	struct l2cap_conn *conn = chan->conn;
2368 	struct sk_buff *skb;
2369 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2370 	struct l2cap_hdr *lh;
2371 
2372 	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2373 	       __le16_to_cpu(chan->psm), len, priority);
2374 
2375 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2376 
2377 	skb = chan->ops->alloc_skb(chan, count + hlen,
2378 				   msg->msg_flags & MSG_DONTWAIT);
2379 	if (IS_ERR(skb))
2380 		return skb;
2381 
2382 	skb->priority = priority;
2383 
2384 	/* Create L2CAP header */
2385 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2386 	lh->cid = cpu_to_le16(chan->dcid);
2387 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2388 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2389 
2390 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2391 	if (unlikely(err < 0)) {
2392 		kfree_skb(skb);
2393 		return ERR_PTR(err);
2394 	}
2395 	return skb;
2396 }
2397 
2398 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2399 					      struct msghdr *msg, size_t len,
2400 					      u32 priority)
2401 {
2402 	struct l2cap_conn *conn = chan->conn;
2403 	struct sk_buff *skb;
2404 	int err, count;
2405 	struct l2cap_hdr *lh;
2406 
2407 	BT_DBG("chan %p len %zu", chan, len);
2408 
2409 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2410 
2411 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2412 				   msg->msg_flags & MSG_DONTWAIT);
2413 	if (IS_ERR(skb))
2414 		return skb;
2415 
2416 	skb->priority = priority;
2417 
2418 	/* Create L2CAP header */
2419 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2420 	lh->cid = cpu_to_le16(chan->dcid);
2421 	lh->len = cpu_to_le16(len);
2422 
2423 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2424 	if (unlikely(err < 0)) {
2425 		kfree_skb(skb);
2426 		return ERR_PTR(err);
2427 	}
2428 	return skb;
2429 }
2430 
2431 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2432 					       struct msghdr *msg, size_t len,
2433 					       u16 sdulen)
2434 {
2435 	struct l2cap_conn *conn = chan->conn;
2436 	struct sk_buff *skb;
2437 	int err, count, hlen;
2438 	struct l2cap_hdr *lh;
2439 
2440 	BT_DBG("chan %p len %zu", chan, len);
2441 
2442 	if (!conn)
2443 		return ERR_PTR(-ENOTCONN);
2444 
2445 	hlen = __ertm_hdr_size(chan);
2446 
2447 	if (sdulen)
2448 		hlen += L2CAP_SDULEN_SIZE;
2449 
2450 	if (chan->fcs == L2CAP_FCS_CRC16)
2451 		hlen += L2CAP_FCS_SIZE;
2452 
2453 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2454 
2455 	skb = chan->ops->alloc_skb(chan, count + hlen,
2456 				   msg->msg_flags & MSG_DONTWAIT);
2457 	if (IS_ERR(skb))
2458 		return skb;
2459 
2460 	/* Create L2CAP header */
2461 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2462 	lh->cid = cpu_to_le16(chan->dcid);
2463 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2464 
2465 	/* Control header is populated later */
2466 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2467 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2468 	else
2469 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2470 
2471 	if (sdulen)
2472 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2473 
2474 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2475 	if (unlikely(err < 0)) {
2476 		kfree_skb(skb);
2477 		return ERR_PTR(err);
2478 	}
2479 
2480 	bt_cb(skb)->control.fcs = chan->fcs;
2481 	bt_cb(skb)->control.retries = 0;
2482 	return skb;
2483 }
2484 
2485 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2486 			     struct sk_buff_head *seg_queue,
2487 			     struct msghdr *msg, size_t len)
2488 {
2489 	struct sk_buff *skb;
2490 	u16 sdu_len;
2491 	size_t pdu_len;
2492 	u8 sar;
2493 
2494 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2495 
2496 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2497 	 * so fragmented skbs are not used.  The HCI layer's handling
2498 	 * of fragmented skbs is not compatible with ERTM's queueing.
2499 	 */
2500 
2501 	/* PDU size is derived from the HCI MTU */
2502 	pdu_len = chan->conn->mtu;
2503 
2504 	/* Constrain PDU size for BR/EDR connections */
2505 	if (!chan->hs_hcon)
2506 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2507 
2508 	/* Adjust for largest possible L2CAP overhead. */
2509 	if (chan->fcs)
2510 		pdu_len -= L2CAP_FCS_SIZE;
2511 
2512 	pdu_len -= __ertm_hdr_size(chan);
2513 
2514 	/* Remote device may have requested smaller PDUs */
2515 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2516 
2517 	if (len <= pdu_len) {
2518 		sar = L2CAP_SAR_UNSEGMENTED;
2519 		sdu_len = 0;
2520 		pdu_len = len;
2521 	} else {
2522 		sar = L2CAP_SAR_START;
2523 		sdu_len = len;
2524 		pdu_len -= L2CAP_SDULEN_SIZE;
2525 	}
2526 
2527 	while (len > 0) {
2528 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2529 
2530 		if (IS_ERR(skb)) {
2531 			__skb_queue_purge(seg_queue);
2532 			return PTR_ERR(skb);
2533 		}
2534 
2535 		bt_cb(skb)->control.sar = sar;
2536 		__skb_queue_tail(seg_queue, skb);
2537 
2538 		len -= pdu_len;
2539 		if (sdu_len) {
2540 			sdu_len = 0;
2541 			pdu_len += L2CAP_SDULEN_SIZE;
2542 		}
2543 
2544 		if (len <= pdu_len) {
2545 			sar = L2CAP_SAR_END;
2546 			pdu_len = len;
2547 		} else {
2548 			sar = L2CAP_SAR_CONTINUE;
2549 		}
2550 	}
2551 
2552 	return 0;
2553 }
2554 
2555 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2556 						   struct msghdr *msg,
2557 						   size_t len, u16 sdulen)
2558 {
2559 	struct l2cap_conn *conn = chan->conn;
2560 	struct sk_buff *skb;
2561 	int err, count, hlen;
2562 	struct l2cap_hdr *lh;
2563 
2564 	BT_DBG("chan %p len %zu", chan, len);
2565 
2566 	if (!conn)
2567 		return ERR_PTR(-ENOTCONN);
2568 
2569 	hlen = L2CAP_HDR_SIZE;
2570 
2571 	if (sdulen)
2572 		hlen += L2CAP_SDULEN_SIZE;
2573 
2574 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2575 
2576 	skb = chan->ops->alloc_skb(chan, count + hlen,
2577 				   msg->msg_flags & MSG_DONTWAIT);
2578 	if (IS_ERR(skb))
2579 		return skb;
2580 
2581 	/* Create L2CAP header */
2582 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2583 	lh->cid = cpu_to_le16(chan->dcid);
2584 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2585 
2586 	if (sdulen)
2587 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2588 
2589 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2590 	if (unlikely(err < 0)) {
2591 		kfree_skb(skb);
2592 		return ERR_PTR(err);
2593 	}
2594 
2595 	return skb;
2596 }
2597 
2598 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2599 				struct sk_buff_head *seg_queue,
2600 				struct msghdr *msg, size_t len)
2601 {
2602 	struct sk_buff *skb;
2603 	size_t pdu_len;
2604 	u16 sdu_len;
2605 
2606 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2607 
2608 	pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2609 
2610 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2611 
2612 	sdu_len = len;
2613 	pdu_len -= L2CAP_SDULEN_SIZE;
2614 
2615 	while (len > 0) {
2616 		if (len <= pdu_len)
2617 			pdu_len = len;
2618 
2619 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2620 		if (IS_ERR(skb)) {
2621 			__skb_queue_purge(seg_queue);
2622 			return PTR_ERR(skb);
2623 		}
2624 
2625 		__skb_queue_tail(seg_queue, skb);
2626 
2627 		len -= pdu_len;
2628 
2629 		if (sdu_len) {
2630 			sdu_len = 0;
2631 			pdu_len += L2CAP_SDULEN_SIZE;
2632 		}
2633 	}
2634 
2635 	return 0;
2636 }
2637 
2638 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2639 		    u32 priority)
2640 {
2641 	struct sk_buff *skb;
2642 	int err;
2643 	struct sk_buff_head seg_queue;
2644 
2645 	if (!chan->conn)
2646 		return -ENOTCONN;
2647 
2648 	/* Connectionless channel */
2649 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2650 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2651 		if (IS_ERR(skb))
2652 			return PTR_ERR(skb);
2653 
2654 		l2cap_do_send(chan, skb);
2655 		return len;
2656 	}
2657 
2658 	switch (chan->mode) {
2659 	case L2CAP_MODE_LE_FLOWCTL:
2660 		/* Check outgoing MTU */
2661 		if (len > chan->omtu)
2662 			return -EMSGSIZE;
2663 
2664 		if (!chan->tx_credits)
2665 			return -EAGAIN;
2666 
2667 		__skb_queue_head_init(&seg_queue);
2668 
2669 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2670 
2671 		if (chan->state != BT_CONNECTED) {
2672 			__skb_queue_purge(&seg_queue);
2673 			err = -ENOTCONN;
2674 		}
2675 
2676 		if (err)
2677 			return err;
2678 
2679 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2680 
2681 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2682 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2683 			chan->tx_credits--;
2684 		}
2685 
2686 		if (!chan->tx_credits)
2687 			chan->ops->suspend(chan);
2688 
2689 		err = len;
2690 
2691 		break;
2692 
2693 	case L2CAP_MODE_BASIC:
2694 		/* Check outgoing MTU */
2695 		if (len > chan->omtu)
2696 			return -EMSGSIZE;
2697 
2698 		/* Create a basic PDU */
2699 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2700 		if (IS_ERR(skb))
2701 			return PTR_ERR(skb);
2702 
2703 		l2cap_do_send(chan, skb);
2704 		err = len;
2705 		break;
2706 
2707 	case L2CAP_MODE_ERTM:
2708 	case L2CAP_MODE_STREAMING:
2709 		/* Check outgoing MTU */
2710 		if (len > chan->omtu) {
2711 			err = -EMSGSIZE;
2712 			break;
2713 		}
2714 
2715 		__skb_queue_head_init(&seg_queue);
2716 
2717 		/* Do segmentation before calling in to the state machine,
2718 		 * since it's possible to block while waiting for memory
2719 		 * allocation.
2720 		 */
2721 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2722 
2723 		/* The channel could have been closed while segmenting,
2724 		 * check that it is still connected.
2725 		 */
2726 		if (chan->state != BT_CONNECTED) {
2727 			__skb_queue_purge(&seg_queue);
2728 			err = -ENOTCONN;
2729 		}
2730 
2731 		if (err)
2732 			break;
2733 
2734 		if (chan->mode == L2CAP_MODE_ERTM)
2735 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2736 		else
2737 			l2cap_streaming_send(chan, &seg_queue);
2738 
2739 		err = len;
2740 
2741 		/* If the skbs were not queued for sending, they'll still be in
2742 		 * seg_queue and need to be purged.
2743 		 */
2744 		__skb_queue_purge(&seg_queue);
2745 		break;
2746 
2747 	default:
2748 		BT_DBG("bad state %1.1x", chan->mode);
2749 		err = -EBADFD;
2750 	}
2751 
2752 	return err;
2753 }
2754 
2755 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2756 {
2757 	struct l2cap_ctrl control;
2758 	u16 seq;
2759 
2760 	BT_DBG("chan %p, txseq %u", chan, txseq);
2761 
2762 	memset(&control, 0, sizeof(control));
2763 	control.sframe = 1;
2764 	control.super = L2CAP_SUPER_SREJ;
2765 
2766 	for (seq = chan->expected_tx_seq; seq != txseq;
2767 	     seq = __next_seq(chan, seq)) {
2768 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2769 			control.reqseq = seq;
2770 			l2cap_send_sframe(chan, &control);
2771 			l2cap_seq_list_append(&chan->srej_list, seq);
2772 		}
2773 	}
2774 
2775 	chan->expected_tx_seq = __next_seq(chan, txseq);
2776 }
2777 
2778 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2779 {
2780 	struct l2cap_ctrl control;
2781 
2782 	BT_DBG("chan %p", chan);
2783 
2784 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2785 		return;
2786 
2787 	memset(&control, 0, sizeof(control));
2788 	control.sframe = 1;
2789 	control.super = L2CAP_SUPER_SREJ;
2790 	control.reqseq = chan->srej_list.tail;
2791 	l2cap_send_sframe(chan, &control);
2792 }
2793 
2794 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2795 {
2796 	struct l2cap_ctrl control;
2797 	u16 initial_head;
2798 	u16 seq;
2799 
2800 	BT_DBG("chan %p, txseq %u", chan, txseq);
2801 
2802 	memset(&control, 0, sizeof(control));
2803 	control.sframe = 1;
2804 	control.super = L2CAP_SUPER_SREJ;
2805 
2806 	/* Capture initial list head to allow only one pass through the list. */
2807 	initial_head = chan->srej_list.head;
2808 
2809 	do {
2810 		seq = l2cap_seq_list_pop(&chan->srej_list);
2811 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2812 			break;
2813 
2814 		control.reqseq = seq;
2815 		l2cap_send_sframe(chan, &control);
2816 		l2cap_seq_list_append(&chan->srej_list, seq);
2817 	} while (chan->srej_list.head != initial_head);
2818 }
2819 
2820 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2821 {
2822 	struct sk_buff *acked_skb;
2823 	u16 ackseq;
2824 
2825 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2826 
2827 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2828 		return;
2829 
2830 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2831 	       chan->expected_ack_seq, chan->unacked_frames);
2832 
2833 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2834 	     ackseq = __next_seq(chan, ackseq)) {
2835 
2836 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2837 		if (acked_skb) {
2838 			skb_unlink(acked_skb, &chan->tx_q);
2839 			kfree_skb(acked_skb);
2840 			chan->unacked_frames--;
2841 		}
2842 	}
2843 
2844 	chan->expected_ack_seq = reqseq;
2845 
2846 	if (chan->unacked_frames == 0)
2847 		__clear_retrans_timer(chan);
2848 
2849 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2850 }
2851 
2852 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2853 {
2854 	BT_DBG("chan %p", chan);
2855 
2856 	chan->expected_tx_seq = chan->buffer_seq;
2857 	l2cap_seq_list_clear(&chan->srej_list);
2858 	skb_queue_purge(&chan->srej_q);
2859 	chan->rx_state = L2CAP_RX_STATE_RECV;
2860 }
2861 
2862 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2863 				struct l2cap_ctrl *control,
2864 				struct sk_buff_head *skbs, u8 event)
2865 {
2866 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2867 	       event);
2868 
2869 	switch (event) {
2870 	case L2CAP_EV_DATA_REQUEST:
2871 		if (chan->tx_send_head == NULL)
2872 			chan->tx_send_head = skb_peek(skbs);
2873 
2874 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2875 		l2cap_ertm_send(chan);
2876 		break;
2877 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2878 		BT_DBG("Enter LOCAL_BUSY");
2879 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2880 
2881 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2882 			/* The SREJ_SENT state must be aborted if we are to
2883 			 * enter the LOCAL_BUSY state.
2884 			 */
2885 			l2cap_abort_rx_srej_sent(chan);
2886 		}
2887 
2888 		l2cap_send_ack(chan);
2889 
2890 		break;
2891 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2892 		BT_DBG("Exit LOCAL_BUSY");
2893 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2894 
2895 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2896 			struct l2cap_ctrl local_control;
2897 
2898 			memset(&local_control, 0, sizeof(local_control));
2899 			local_control.sframe = 1;
2900 			local_control.super = L2CAP_SUPER_RR;
2901 			local_control.poll = 1;
2902 			local_control.reqseq = chan->buffer_seq;
2903 			l2cap_send_sframe(chan, &local_control);
2904 
2905 			chan->retry_count = 1;
2906 			__set_monitor_timer(chan);
2907 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2908 		}
2909 		break;
2910 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2911 		l2cap_process_reqseq(chan, control->reqseq);
2912 		break;
2913 	case L2CAP_EV_EXPLICIT_POLL:
2914 		l2cap_send_rr_or_rnr(chan, 1);
2915 		chan->retry_count = 1;
2916 		__set_monitor_timer(chan);
2917 		__clear_ack_timer(chan);
2918 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2919 		break;
2920 	case L2CAP_EV_RETRANS_TO:
2921 		l2cap_send_rr_or_rnr(chan, 1);
2922 		chan->retry_count = 1;
2923 		__set_monitor_timer(chan);
2924 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2925 		break;
2926 	case L2CAP_EV_RECV_FBIT:
2927 		/* Nothing to process */
2928 		break;
2929 	default:
2930 		break;
2931 	}
2932 }
2933 
2934 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2935 				  struct l2cap_ctrl *control,
2936 				  struct sk_buff_head *skbs, u8 event)
2937 {
2938 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2939 	       event);
2940 
2941 	switch (event) {
2942 	case L2CAP_EV_DATA_REQUEST:
2943 		if (chan->tx_send_head == NULL)
2944 			chan->tx_send_head = skb_peek(skbs);
2945 		/* Queue data, but don't send. */
2946 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2947 		break;
2948 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2949 		BT_DBG("Enter LOCAL_BUSY");
2950 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2951 
2952 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2953 			/* The SREJ_SENT state must be aborted if we are to
2954 			 * enter the LOCAL_BUSY state.
2955 			 */
2956 			l2cap_abort_rx_srej_sent(chan);
2957 		}
2958 
2959 		l2cap_send_ack(chan);
2960 
2961 		break;
2962 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2963 		BT_DBG("Exit LOCAL_BUSY");
2964 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2965 
2966 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2967 			struct l2cap_ctrl local_control;
2968 			memset(&local_control, 0, sizeof(local_control));
2969 			local_control.sframe = 1;
2970 			local_control.super = L2CAP_SUPER_RR;
2971 			local_control.poll = 1;
2972 			local_control.reqseq = chan->buffer_seq;
2973 			l2cap_send_sframe(chan, &local_control);
2974 
2975 			chan->retry_count = 1;
2976 			__set_monitor_timer(chan);
2977 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2978 		}
2979 		break;
2980 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2981 		l2cap_process_reqseq(chan, control->reqseq);
2982 
2983 		/* Fall through */
2984 
2985 	case L2CAP_EV_RECV_FBIT:
2986 		if (control && control->final) {
2987 			__clear_monitor_timer(chan);
2988 			if (chan->unacked_frames > 0)
2989 				__set_retrans_timer(chan);
2990 			chan->retry_count = 0;
2991 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2992 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2993 		}
2994 		break;
2995 	case L2CAP_EV_EXPLICIT_POLL:
2996 		/* Ignore */
2997 		break;
2998 	case L2CAP_EV_MONITOR_TO:
2999 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3000 			l2cap_send_rr_or_rnr(chan, 1);
3001 			__set_monitor_timer(chan);
3002 			chan->retry_count++;
3003 		} else {
3004 			l2cap_send_disconn_req(chan, ECONNABORTED);
3005 		}
3006 		break;
3007 	default:
3008 		break;
3009 	}
3010 }
3011 
3012 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3013 		     struct sk_buff_head *skbs, u8 event)
3014 {
3015 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3016 	       chan, control, skbs, event, chan->tx_state);
3017 
3018 	switch (chan->tx_state) {
3019 	case L2CAP_TX_STATE_XMIT:
3020 		l2cap_tx_state_xmit(chan, control, skbs, event);
3021 		break;
3022 	case L2CAP_TX_STATE_WAIT_F:
3023 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3024 		break;
3025 	default:
3026 		/* Ignore event */
3027 		break;
3028 	}
3029 }
3030 
3031 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3032 			     struct l2cap_ctrl *control)
3033 {
3034 	BT_DBG("chan %p, control %p", chan, control);
3035 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3036 }
3037 
3038 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3039 				  struct l2cap_ctrl *control)
3040 {
3041 	BT_DBG("chan %p, control %p", chan, control);
3042 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3043 }
3044 
3045 /* Copy frame to all raw sockets on that connection */
3046 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3047 {
3048 	struct sk_buff *nskb;
3049 	struct l2cap_chan *chan;
3050 
3051 	BT_DBG("conn %p", conn);
3052 
3053 	mutex_lock(&conn->chan_lock);
3054 
3055 	list_for_each_entry(chan, &conn->chan_l, list) {
3056 		if (chan->chan_type != L2CAP_CHAN_RAW)
3057 			continue;
3058 
3059 		/* Don't send frame to the channel it came from */
3060 		if (bt_cb(skb)->chan == chan)
3061 			continue;
3062 
3063 		nskb = skb_clone(skb, GFP_KERNEL);
3064 		if (!nskb)
3065 			continue;
3066 		if (chan->ops->recv(chan, nskb))
3067 			kfree_skb(nskb);
3068 	}
3069 
3070 	mutex_unlock(&conn->chan_lock);
3071 }
3072 
3073 /* ---- L2CAP signalling commands ---- */
3074 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3075 				       u8 ident, u16 dlen, void *data)
3076 {
3077 	struct sk_buff *skb, **frag;
3078 	struct l2cap_cmd_hdr *cmd;
3079 	struct l2cap_hdr *lh;
3080 	int len, count;
3081 
3082 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3083 	       conn, code, ident, dlen);
3084 
3085 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3086 		return NULL;
3087 
3088 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3089 	count = min_t(unsigned int, conn->mtu, len);
3090 
3091 	skb = bt_skb_alloc(count, GFP_KERNEL);
3092 	if (!skb)
3093 		return NULL;
3094 
3095 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
3096 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3097 
3098 	if (conn->hcon->type == LE_LINK)
3099 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3100 	else
3101 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
3102 
3103 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
3104 	cmd->code  = code;
3105 	cmd->ident = ident;
3106 	cmd->len   = cpu_to_le16(dlen);
3107 
3108 	if (dlen) {
3109 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3110 		memcpy(skb_put(skb, count), data, count);
3111 		data += count;
3112 	}
3113 
3114 	len -= skb->len;
3115 
3116 	/* Continuation fragments (no L2CAP header) */
3117 	frag = &skb_shinfo(skb)->frag_list;
3118 	while (len) {
3119 		count = min_t(unsigned int, conn->mtu, len);
3120 
3121 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3122 		if (!*frag)
3123 			goto fail;
3124 
3125 		memcpy(skb_put(*frag, count), data, count);
3126 
3127 		len  -= count;
3128 		data += count;
3129 
3130 		frag = &(*frag)->next;
3131 	}
3132 
3133 	return skb;
3134 
3135 fail:
3136 	kfree_skb(skb);
3137 	return NULL;
3138 }
3139 
3140 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3141 				     unsigned long *val)
3142 {
3143 	struct l2cap_conf_opt *opt = *ptr;
3144 	int len;
3145 
3146 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3147 	*ptr += len;
3148 
3149 	*type = opt->type;
3150 	*olen = opt->len;
3151 
3152 	switch (opt->len) {
3153 	case 1:
3154 		*val = *((u8 *) opt->val);
3155 		break;
3156 
3157 	case 2:
3158 		*val = get_unaligned_le16(opt->val);
3159 		break;
3160 
3161 	case 4:
3162 		*val = get_unaligned_le32(opt->val);
3163 		break;
3164 
3165 	default:
3166 		*val = (unsigned long) opt->val;
3167 		break;
3168 	}
3169 
3170 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3171 	return len;
3172 }
3173 
3174 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
3175 {
3176 	struct l2cap_conf_opt *opt = *ptr;
3177 
3178 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3179 
3180 	opt->type = type;
3181 	opt->len  = len;
3182 
3183 	switch (len) {
3184 	case 1:
3185 		*((u8 *) opt->val)  = val;
3186 		break;
3187 
3188 	case 2:
3189 		put_unaligned_le16(val, opt->val);
3190 		break;
3191 
3192 	case 4:
3193 		put_unaligned_le32(val, opt->val);
3194 		break;
3195 
3196 	default:
3197 		memcpy(opt->val, (void *) val, len);
3198 		break;
3199 	}
3200 
3201 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3202 }
3203 
3204 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3205 {
3206 	struct l2cap_conf_efs efs;
3207 
3208 	switch (chan->mode) {
3209 	case L2CAP_MODE_ERTM:
3210 		efs.id		= chan->local_id;
3211 		efs.stype	= chan->local_stype;
3212 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3213 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3214 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3215 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3216 		break;
3217 
3218 	case L2CAP_MODE_STREAMING:
3219 		efs.id		= 1;
3220 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3221 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3222 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3223 		efs.acc_lat	= 0;
3224 		efs.flush_to	= 0;
3225 		break;
3226 
3227 	default:
3228 		return;
3229 	}
3230 
3231 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3232 			   (unsigned long) &efs);
3233 }
3234 
3235 static void l2cap_ack_timeout(struct work_struct *work)
3236 {
3237 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3238 					       ack_timer.work);
3239 	u16 frames_to_ack;
3240 
3241 	BT_DBG("chan %p", chan);
3242 
3243 	l2cap_chan_lock(chan);
3244 
3245 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3246 				     chan->last_acked_seq);
3247 
3248 	if (frames_to_ack)
3249 		l2cap_send_rr_or_rnr(chan, 0);
3250 
3251 	l2cap_chan_unlock(chan);
3252 	l2cap_chan_put(chan);
3253 }
3254 
3255 int l2cap_ertm_init(struct l2cap_chan *chan)
3256 {
3257 	int err;
3258 
3259 	chan->next_tx_seq = 0;
3260 	chan->expected_tx_seq = 0;
3261 	chan->expected_ack_seq = 0;
3262 	chan->unacked_frames = 0;
3263 	chan->buffer_seq = 0;
3264 	chan->frames_sent = 0;
3265 	chan->last_acked_seq = 0;
3266 	chan->sdu = NULL;
3267 	chan->sdu_last_frag = NULL;
3268 	chan->sdu_len = 0;
3269 
3270 	skb_queue_head_init(&chan->tx_q);
3271 
3272 	chan->local_amp_id = AMP_ID_BREDR;
3273 	chan->move_id = AMP_ID_BREDR;
3274 	chan->move_state = L2CAP_MOVE_STABLE;
3275 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3276 
3277 	if (chan->mode != L2CAP_MODE_ERTM)
3278 		return 0;
3279 
3280 	chan->rx_state = L2CAP_RX_STATE_RECV;
3281 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3282 
3283 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3284 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3285 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3286 
3287 	skb_queue_head_init(&chan->srej_q);
3288 
3289 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3290 	if (err < 0)
3291 		return err;
3292 
3293 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3294 	if (err < 0)
3295 		l2cap_seq_list_free(&chan->srej_list);
3296 
3297 	return err;
3298 }
3299 
3300 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3301 {
3302 	switch (mode) {
3303 	case L2CAP_MODE_STREAMING:
3304 	case L2CAP_MODE_ERTM:
3305 		if (l2cap_mode_supported(mode, remote_feat_mask))
3306 			return mode;
3307 		/* fall through */
3308 	default:
3309 		return L2CAP_MODE_BASIC;
3310 	}
3311 }
3312 
3313 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3314 {
3315 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3316 }
3317 
3318 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3319 {
3320 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3321 }
3322 
3323 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3324 				      struct l2cap_conf_rfc *rfc)
3325 {
3326 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3327 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3328 
3329 		/* Class 1 devices have must have ERTM timeouts
3330 		 * exceeding the Link Supervision Timeout.  The
3331 		 * default Link Supervision Timeout for AMP
3332 		 * controllers is 10 seconds.
3333 		 *
3334 		 * Class 1 devices use 0xffffffff for their
3335 		 * best-effort flush timeout, so the clamping logic
3336 		 * will result in a timeout that meets the above
3337 		 * requirement.  ERTM timeouts are 16-bit values, so
3338 		 * the maximum timeout is 65.535 seconds.
3339 		 */
3340 
3341 		/* Convert timeout to milliseconds and round */
3342 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3343 
3344 		/* This is the recommended formula for class 2 devices
3345 		 * that start ERTM timers when packets are sent to the
3346 		 * controller.
3347 		 */
3348 		ertm_to = 3 * ertm_to + 500;
3349 
3350 		if (ertm_to > 0xffff)
3351 			ertm_to = 0xffff;
3352 
3353 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3354 		rfc->monitor_timeout = rfc->retrans_timeout;
3355 	} else {
3356 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3357 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3358 	}
3359 }
3360 
3361 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3362 {
3363 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3364 	    __l2cap_ews_supported(chan->conn)) {
3365 		/* use extended control field */
3366 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3367 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3368 	} else {
3369 		chan->tx_win = min_t(u16, chan->tx_win,
3370 				     L2CAP_DEFAULT_TX_WINDOW);
3371 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3372 	}
3373 	chan->ack_win = chan->tx_win;
3374 }
3375 
3376 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3377 {
3378 	struct l2cap_conf_req *req = data;
3379 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3380 	void *ptr = req->data;
3381 	u16 size;
3382 
3383 	BT_DBG("chan %p", chan);
3384 
3385 	if (chan->num_conf_req || chan->num_conf_rsp)
3386 		goto done;
3387 
3388 	switch (chan->mode) {
3389 	case L2CAP_MODE_STREAMING:
3390 	case L2CAP_MODE_ERTM:
3391 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3392 			break;
3393 
3394 		if (__l2cap_efs_supported(chan->conn))
3395 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3396 
3397 		/* fall through */
3398 	default:
3399 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3400 		break;
3401 	}
3402 
3403 done:
3404 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3405 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3406 
3407 	switch (chan->mode) {
3408 	case L2CAP_MODE_BASIC:
3409 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3410 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3411 			break;
3412 
3413 		rfc.mode            = L2CAP_MODE_BASIC;
3414 		rfc.txwin_size      = 0;
3415 		rfc.max_transmit    = 0;
3416 		rfc.retrans_timeout = 0;
3417 		rfc.monitor_timeout = 0;
3418 		rfc.max_pdu_size    = 0;
3419 
3420 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3421 				   (unsigned long) &rfc);
3422 		break;
3423 
3424 	case L2CAP_MODE_ERTM:
3425 		rfc.mode            = L2CAP_MODE_ERTM;
3426 		rfc.max_transmit    = chan->max_tx;
3427 
3428 		__l2cap_set_ertm_timeouts(chan, &rfc);
3429 
3430 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3431 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3432 			     L2CAP_FCS_SIZE);
3433 		rfc.max_pdu_size = cpu_to_le16(size);
3434 
3435 		l2cap_txwin_setup(chan);
3436 
3437 		rfc.txwin_size = min_t(u16, chan->tx_win,
3438 				       L2CAP_DEFAULT_TX_WINDOW);
3439 
3440 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3441 				   (unsigned long) &rfc);
3442 
3443 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3444 			l2cap_add_opt_efs(&ptr, chan);
3445 
3446 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3447 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3448 					   chan->tx_win);
3449 
3450 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3451 			if (chan->fcs == L2CAP_FCS_NONE ||
3452 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3453 				chan->fcs = L2CAP_FCS_NONE;
3454 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3455 						   chan->fcs);
3456 			}
3457 		break;
3458 
3459 	case L2CAP_MODE_STREAMING:
3460 		l2cap_txwin_setup(chan);
3461 		rfc.mode            = L2CAP_MODE_STREAMING;
3462 		rfc.txwin_size      = 0;
3463 		rfc.max_transmit    = 0;
3464 		rfc.retrans_timeout = 0;
3465 		rfc.monitor_timeout = 0;
3466 
3467 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3468 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3469 			     L2CAP_FCS_SIZE);
3470 		rfc.max_pdu_size = cpu_to_le16(size);
3471 
3472 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3473 				   (unsigned long) &rfc);
3474 
3475 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3476 			l2cap_add_opt_efs(&ptr, chan);
3477 
3478 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3479 			if (chan->fcs == L2CAP_FCS_NONE ||
3480 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3481 				chan->fcs = L2CAP_FCS_NONE;
3482 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3483 						   chan->fcs);
3484 			}
3485 		break;
3486 	}
3487 
3488 	req->dcid  = cpu_to_le16(chan->dcid);
3489 	req->flags = __constant_cpu_to_le16(0);
3490 
3491 	return ptr - data;
3492 }
3493 
3494 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3495 {
3496 	struct l2cap_conf_rsp *rsp = data;
3497 	void *ptr = rsp->data;
3498 	void *req = chan->conf_req;
3499 	int len = chan->conf_len;
3500 	int type, hint, olen;
3501 	unsigned long val;
3502 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3503 	struct l2cap_conf_efs efs;
3504 	u8 remote_efs = 0;
3505 	u16 mtu = L2CAP_DEFAULT_MTU;
3506 	u16 result = L2CAP_CONF_SUCCESS;
3507 	u16 size;
3508 
3509 	BT_DBG("chan %p", chan);
3510 
3511 	while (len >= L2CAP_CONF_OPT_SIZE) {
3512 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3513 
3514 		hint  = type & L2CAP_CONF_HINT;
3515 		type &= L2CAP_CONF_MASK;
3516 
3517 		switch (type) {
3518 		case L2CAP_CONF_MTU:
3519 			mtu = val;
3520 			break;
3521 
3522 		case L2CAP_CONF_FLUSH_TO:
3523 			chan->flush_to = val;
3524 			break;
3525 
3526 		case L2CAP_CONF_QOS:
3527 			break;
3528 
3529 		case L2CAP_CONF_RFC:
3530 			if (olen == sizeof(rfc))
3531 				memcpy(&rfc, (void *) val, olen);
3532 			break;
3533 
3534 		case L2CAP_CONF_FCS:
3535 			if (val == L2CAP_FCS_NONE)
3536 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3537 			break;
3538 
3539 		case L2CAP_CONF_EFS:
3540 			remote_efs = 1;
3541 			if (olen == sizeof(efs))
3542 				memcpy(&efs, (void *) val, olen);
3543 			break;
3544 
3545 		case L2CAP_CONF_EWS:
3546 			if (!chan->conn->hs_enabled)
3547 				return -ECONNREFUSED;
3548 
3549 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3550 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3551 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3552 			chan->remote_tx_win = val;
3553 			break;
3554 
3555 		default:
3556 			if (hint)
3557 				break;
3558 
3559 			result = L2CAP_CONF_UNKNOWN;
3560 			*((u8 *) ptr++) = type;
3561 			break;
3562 		}
3563 	}
3564 
3565 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3566 		goto done;
3567 
3568 	switch (chan->mode) {
3569 	case L2CAP_MODE_STREAMING:
3570 	case L2CAP_MODE_ERTM:
3571 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3572 			chan->mode = l2cap_select_mode(rfc.mode,
3573 						       chan->conn->feat_mask);
3574 			break;
3575 		}
3576 
3577 		if (remote_efs) {
3578 			if (__l2cap_efs_supported(chan->conn))
3579 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3580 			else
3581 				return -ECONNREFUSED;
3582 		}
3583 
3584 		if (chan->mode != rfc.mode)
3585 			return -ECONNREFUSED;
3586 
3587 		break;
3588 	}
3589 
3590 done:
3591 	if (chan->mode != rfc.mode) {
3592 		result = L2CAP_CONF_UNACCEPT;
3593 		rfc.mode = chan->mode;
3594 
3595 		if (chan->num_conf_rsp == 1)
3596 			return -ECONNREFUSED;
3597 
3598 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3599 				   (unsigned long) &rfc);
3600 	}
3601 
3602 	if (result == L2CAP_CONF_SUCCESS) {
3603 		/* Configure output options and let the other side know
3604 		 * which ones we don't like. */
3605 
3606 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3607 			result = L2CAP_CONF_UNACCEPT;
3608 		else {
3609 			chan->omtu = mtu;
3610 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3611 		}
3612 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3613 
3614 		if (remote_efs) {
3615 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3616 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3617 			    efs.stype != chan->local_stype) {
3618 
3619 				result = L2CAP_CONF_UNACCEPT;
3620 
3621 				if (chan->num_conf_req >= 1)
3622 					return -ECONNREFUSED;
3623 
3624 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3625 						   sizeof(efs),
3626 						   (unsigned long) &efs);
3627 			} else {
3628 				/* Send PENDING Conf Rsp */
3629 				result = L2CAP_CONF_PENDING;
3630 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3631 			}
3632 		}
3633 
3634 		switch (rfc.mode) {
3635 		case L2CAP_MODE_BASIC:
3636 			chan->fcs = L2CAP_FCS_NONE;
3637 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3638 			break;
3639 
3640 		case L2CAP_MODE_ERTM:
3641 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3642 				chan->remote_tx_win = rfc.txwin_size;
3643 			else
3644 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3645 
3646 			chan->remote_max_tx = rfc.max_transmit;
3647 
3648 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3649 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3650 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3651 			rfc.max_pdu_size = cpu_to_le16(size);
3652 			chan->remote_mps = size;
3653 
3654 			__l2cap_set_ertm_timeouts(chan, &rfc);
3655 
3656 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3657 
3658 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3659 					   sizeof(rfc), (unsigned long) &rfc);
3660 
3661 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3662 				chan->remote_id = efs.id;
3663 				chan->remote_stype = efs.stype;
3664 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3665 				chan->remote_flush_to =
3666 					le32_to_cpu(efs.flush_to);
3667 				chan->remote_acc_lat =
3668 					le32_to_cpu(efs.acc_lat);
3669 				chan->remote_sdu_itime =
3670 					le32_to_cpu(efs.sdu_itime);
3671 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3672 						   sizeof(efs),
3673 						   (unsigned long) &efs);
3674 			}
3675 			break;
3676 
3677 		case L2CAP_MODE_STREAMING:
3678 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3679 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3680 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3681 			rfc.max_pdu_size = cpu_to_le16(size);
3682 			chan->remote_mps = size;
3683 
3684 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3685 
3686 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3687 					   (unsigned long) &rfc);
3688 
3689 			break;
3690 
3691 		default:
3692 			result = L2CAP_CONF_UNACCEPT;
3693 
3694 			memset(&rfc, 0, sizeof(rfc));
3695 			rfc.mode = chan->mode;
3696 		}
3697 
3698 		if (result == L2CAP_CONF_SUCCESS)
3699 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3700 	}
3701 	rsp->scid   = cpu_to_le16(chan->dcid);
3702 	rsp->result = cpu_to_le16(result);
3703 	rsp->flags  = __constant_cpu_to_le16(0);
3704 
3705 	return ptr - data;
3706 }
3707 
3708 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3709 				void *data, u16 *result)
3710 {
3711 	struct l2cap_conf_req *req = data;
3712 	void *ptr = req->data;
3713 	int type, olen;
3714 	unsigned long val;
3715 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3716 	struct l2cap_conf_efs efs;
3717 
3718 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3719 
3720 	while (len >= L2CAP_CONF_OPT_SIZE) {
3721 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3722 
3723 		switch (type) {
3724 		case L2CAP_CONF_MTU:
3725 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3726 				*result = L2CAP_CONF_UNACCEPT;
3727 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3728 			} else
3729 				chan->imtu = val;
3730 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3731 			break;
3732 
3733 		case L2CAP_CONF_FLUSH_TO:
3734 			chan->flush_to = val;
3735 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3736 					   2, chan->flush_to);
3737 			break;
3738 
3739 		case L2CAP_CONF_RFC:
3740 			if (olen == sizeof(rfc))
3741 				memcpy(&rfc, (void *)val, olen);
3742 
3743 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3744 			    rfc.mode != chan->mode)
3745 				return -ECONNREFUSED;
3746 
3747 			chan->fcs = 0;
3748 
3749 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3750 					   sizeof(rfc), (unsigned long) &rfc);
3751 			break;
3752 
3753 		case L2CAP_CONF_EWS:
3754 			chan->ack_win = min_t(u16, val, chan->ack_win);
3755 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3756 					   chan->tx_win);
3757 			break;
3758 
3759 		case L2CAP_CONF_EFS:
3760 			if (olen == sizeof(efs))
3761 				memcpy(&efs, (void *)val, olen);
3762 
3763 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3764 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3765 			    efs.stype != chan->local_stype)
3766 				return -ECONNREFUSED;
3767 
3768 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3769 					   (unsigned long) &efs);
3770 			break;
3771 
3772 		case L2CAP_CONF_FCS:
3773 			if (*result == L2CAP_CONF_PENDING)
3774 				if (val == L2CAP_FCS_NONE)
3775 					set_bit(CONF_RECV_NO_FCS,
3776 						&chan->conf_state);
3777 			break;
3778 		}
3779 	}
3780 
3781 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3782 		return -ECONNREFUSED;
3783 
3784 	chan->mode = rfc.mode;
3785 
3786 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3787 		switch (rfc.mode) {
3788 		case L2CAP_MODE_ERTM:
3789 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3790 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3791 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3792 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3793 				chan->ack_win = min_t(u16, chan->ack_win,
3794 						      rfc.txwin_size);
3795 
3796 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3797 				chan->local_msdu = le16_to_cpu(efs.msdu);
3798 				chan->local_sdu_itime =
3799 					le32_to_cpu(efs.sdu_itime);
3800 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3801 				chan->local_flush_to =
3802 					le32_to_cpu(efs.flush_to);
3803 			}
3804 			break;
3805 
3806 		case L2CAP_MODE_STREAMING:
3807 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3808 		}
3809 	}
3810 
3811 	req->dcid   = cpu_to_le16(chan->dcid);
3812 	req->flags  = __constant_cpu_to_le16(0);
3813 
3814 	return ptr - data;
3815 }
3816 
3817 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3818 				u16 result, u16 flags)
3819 {
3820 	struct l2cap_conf_rsp *rsp = data;
3821 	void *ptr = rsp->data;
3822 
3823 	BT_DBG("chan %p", chan);
3824 
3825 	rsp->scid   = cpu_to_le16(chan->dcid);
3826 	rsp->result = cpu_to_le16(result);
3827 	rsp->flags  = cpu_to_le16(flags);
3828 
3829 	return ptr - data;
3830 }
3831 
3832 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3833 {
3834 	struct l2cap_le_conn_rsp rsp;
3835 	struct l2cap_conn *conn = chan->conn;
3836 
3837 	BT_DBG("chan %p", chan);
3838 
3839 	rsp.dcid    = cpu_to_le16(chan->scid);
3840 	rsp.mtu     = cpu_to_le16(chan->imtu);
3841 	rsp.mps     = cpu_to_le16(chan->mps);
3842 	rsp.credits = cpu_to_le16(chan->rx_credits);
3843 	rsp.result  = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3844 
3845 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3846 		       &rsp);
3847 }
3848 
3849 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3850 {
3851 	struct l2cap_conn_rsp rsp;
3852 	struct l2cap_conn *conn = chan->conn;
3853 	u8 buf[128];
3854 	u8 rsp_code;
3855 
3856 	rsp.scid   = cpu_to_le16(chan->dcid);
3857 	rsp.dcid   = cpu_to_le16(chan->scid);
3858 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3859 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3860 
3861 	if (chan->hs_hcon)
3862 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3863 	else
3864 		rsp_code = L2CAP_CONN_RSP;
3865 
3866 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3867 
3868 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3869 
3870 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3871 		return;
3872 
3873 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3874 		       l2cap_build_conf_req(chan, buf), buf);
3875 	chan->num_conf_req++;
3876 }
3877 
3878 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3879 {
3880 	int type, olen;
3881 	unsigned long val;
3882 	/* Use sane default values in case a misbehaving remote device
3883 	 * did not send an RFC or extended window size option.
3884 	 */
3885 	u16 txwin_ext = chan->ack_win;
3886 	struct l2cap_conf_rfc rfc = {
3887 		.mode = chan->mode,
3888 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3889 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3890 		.max_pdu_size = cpu_to_le16(chan->imtu),
3891 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3892 	};
3893 
3894 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3895 
3896 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3897 		return;
3898 
3899 	while (len >= L2CAP_CONF_OPT_SIZE) {
3900 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3901 
3902 		switch (type) {
3903 		case L2CAP_CONF_RFC:
3904 			if (olen == sizeof(rfc))
3905 				memcpy(&rfc, (void *)val, olen);
3906 			break;
3907 		case L2CAP_CONF_EWS:
3908 			txwin_ext = val;
3909 			break;
3910 		}
3911 	}
3912 
3913 	switch (rfc.mode) {
3914 	case L2CAP_MODE_ERTM:
3915 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3916 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3917 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3918 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3919 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3920 		else
3921 			chan->ack_win = min_t(u16, chan->ack_win,
3922 					      rfc.txwin_size);
3923 		break;
3924 	case L2CAP_MODE_STREAMING:
3925 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3926 	}
3927 }
3928 
3929 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3930 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3931 				    u8 *data)
3932 {
3933 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3934 
3935 	if (cmd_len < sizeof(*rej))
3936 		return -EPROTO;
3937 
3938 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3939 		return 0;
3940 
3941 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3942 	    cmd->ident == conn->info_ident) {
3943 		cancel_delayed_work(&conn->info_timer);
3944 
3945 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3946 		conn->info_ident = 0;
3947 
3948 		l2cap_conn_start(conn);
3949 	}
3950 
3951 	return 0;
3952 }
3953 
3954 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3955 					struct l2cap_cmd_hdr *cmd,
3956 					u8 *data, u8 rsp_code, u8 amp_id)
3957 {
3958 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3959 	struct l2cap_conn_rsp rsp;
3960 	struct l2cap_chan *chan = NULL, *pchan;
3961 	int result, status = L2CAP_CS_NO_INFO;
3962 
3963 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3964 	__le16 psm = req->psm;
3965 
3966 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3967 
3968 	/* Check if we have socket listening on psm */
3969 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3970 					 &conn->hcon->dst, ACL_LINK);
3971 	if (!pchan) {
3972 		result = L2CAP_CR_BAD_PSM;
3973 		goto sendresp;
3974 	}
3975 
3976 	mutex_lock(&conn->chan_lock);
3977 	l2cap_chan_lock(pchan);
3978 
3979 	/* Check if the ACL is secure enough (if not SDP) */
3980 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3981 	    !hci_conn_check_link_mode(conn->hcon)) {
3982 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3983 		result = L2CAP_CR_SEC_BLOCK;
3984 		goto response;
3985 	}
3986 
3987 	result = L2CAP_CR_NO_MEM;
3988 
3989 	/* Check if we already have channel with that dcid */
3990 	if (__l2cap_get_chan_by_dcid(conn, scid))
3991 		goto response;
3992 
3993 	chan = pchan->ops->new_connection(pchan);
3994 	if (!chan)
3995 		goto response;
3996 
3997 	/* For certain devices (ex: HID mouse), support for authentication,
3998 	 * pairing and bonding is optional. For such devices, inorder to avoid
3999 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4000 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4001 	 */
4002 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4003 
4004 	bacpy(&chan->src, &conn->hcon->src);
4005 	bacpy(&chan->dst, &conn->hcon->dst);
4006 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
4007 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
4008 	chan->psm  = psm;
4009 	chan->dcid = scid;
4010 	chan->local_amp_id = amp_id;
4011 
4012 	__l2cap_chan_add(conn, chan);
4013 
4014 	dcid = chan->scid;
4015 
4016 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4017 
4018 	chan->ident = cmd->ident;
4019 
4020 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4021 		if (l2cap_chan_check_security(chan)) {
4022 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4023 				l2cap_state_change(chan, BT_CONNECT2);
4024 				result = L2CAP_CR_PEND;
4025 				status = L2CAP_CS_AUTHOR_PEND;
4026 				chan->ops->defer(chan);
4027 			} else {
4028 				/* Force pending result for AMP controllers.
4029 				 * The connection will succeed after the
4030 				 * physical link is up.
4031 				 */
4032 				if (amp_id == AMP_ID_BREDR) {
4033 					l2cap_state_change(chan, BT_CONFIG);
4034 					result = L2CAP_CR_SUCCESS;
4035 				} else {
4036 					l2cap_state_change(chan, BT_CONNECT2);
4037 					result = L2CAP_CR_PEND;
4038 				}
4039 				status = L2CAP_CS_NO_INFO;
4040 			}
4041 		} else {
4042 			l2cap_state_change(chan, BT_CONNECT2);
4043 			result = L2CAP_CR_PEND;
4044 			status = L2CAP_CS_AUTHEN_PEND;
4045 		}
4046 	} else {
4047 		l2cap_state_change(chan, BT_CONNECT2);
4048 		result = L2CAP_CR_PEND;
4049 		status = L2CAP_CS_NO_INFO;
4050 	}
4051 
4052 response:
4053 	l2cap_chan_unlock(pchan);
4054 	mutex_unlock(&conn->chan_lock);
4055 
4056 sendresp:
4057 	rsp.scid   = cpu_to_le16(scid);
4058 	rsp.dcid   = cpu_to_le16(dcid);
4059 	rsp.result = cpu_to_le16(result);
4060 	rsp.status = cpu_to_le16(status);
4061 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4062 
4063 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4064 		struct l2cap_info_req info;
4065 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4066 
4067 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4068 		conn->info_ident = l2cap_get_ident(conn);
4069 
4070 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4071 
4072 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4073 			       sizeof(info), &info);
4074 	}
4075 
4076 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4077 	    result == L2CAP_CR_SUCCESS) {
4078 		u8 buf[128];
4079 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4080 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4081 			       l2cap_build_conf_req(chan, buf), buf);
4082 		chan->num_conf_req++;
4083 	}
4084 
4085 	return chan;
4086 }
4087 
4088 static int l2cap_connect_req(struct l2cap_conn *conn,
4089 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4090 {
4091 	struct hci_dev *hdev = conn->hcon->hdev;
4092 	struct hci_conn *hcon = conn->hcon;
4093 
4094 	if (cmd_len < sizeof(struct l2cap_conn_req))
4095 		return -EPROTO;
4096 
4097 	hci_dev_lock(hdev);
4098 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
4099 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4100 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
4101 				      hcon->dst_type, 0, NULL, 0,
4102 				      hcon->dev_class);
4103 	hci_dev_unlock(hdev);
4104 
4105 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4106 	return 0;
4107 }
4108 
4109 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4110 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4111 				    u8 *data)
4112 {
4113 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4114 	u16 scid, dcid, result, status;
4115 	struct l2cap_chan *chan;
4116 	u8 req[128];
4117 	int err;
4118 
4119 	if (cmd_len < sizeof(*rsp))
4120 		return -EPROTO;
4121 
4122 	scid   = __le16_to_cpu(rsp->scid);
4123 	dcid   = __le16_to_cpu(rsp->dcid);
4124 	result = __le16_to_cpu(rsp->result);
4125 	status = __le16_to_cpu(rsp->status);
4126 
4127 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4128 	       dcid, scid, result, status);
4129 
4130 	mutex_lock(&conn->chan_lock);
4131 
4132 	if (scid) {
4133 		chan = __l2cap_get_chan_by_scid(conn, scid);
4134 		if (!chan) {
4135 			err = -EBADSLT;
4136 			goto unlock;
4137 		}
4138 	} else {
4139 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4140 		if (!chan) {
4141 			err = -EBADSLT;
4142 			goto unlock;
4143 		}
4144 	}
4145 
4146 	err = 0;
4147 
4148 	l2cap_chan_lock(chan);
4149 
4150 	switch (result) {
4151 	case L2CAP_CR_SUCCESS:
4152 		l2cap_state_change(chan, BT_CONFIG);
4153 		chan->ident = 0;
4154 		chan->dcid = dcid;
4155 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4156 
4157 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4158 			break;
4159 
4160 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4161 			       l2cap_build_conf_req(chan, req), req);
4162 		chan->num_conf_req++;
4163 		break;
4164 
4165 	case L2CAP_CR_PEND:
4166 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4167 		break;
4168 
4169 	default:
4170 		l2cap_chan_del(chan, ECONNREFUSED);
4171 		break;
4172 	}
4173 
4174 	l2cap_chan_unlock(chan);
4175 
4176 unlock:
4177 	mutex_unlock(&conn->chan_lock);
4178 
4179 	return err;
4180 }
4181 
4182 static inline void set_default_fcs(struct l2cap_chan *chan)
4183 {
4184 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4185 	 * sides request it.
4186 	 */
4187 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4188 		chan->fcs = L2CAP_FCS_NONE;
4189 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4190 		chan->fcs = L2CAP_FCS_CRC16;
4191 }
4192 
4193 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4194 				    u8 ident, u16 flags)
4195 {
4196 	struct l2cap_conn *conn = chan->conn;
4197 
4198 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4199 	       flags);
4200 
4201 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4202 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4203 
4204 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4205 		       l2cap_build_conf_rsp(chan, data,
4206 					    L2CAP_CONF_SUCCESS, flags), data);
4207 }
4208 
4209 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4210 				   u16 scid, u16 dcid)
4211 {
4212 	struct l2cap_cmd_rej_cid rej;
4213 
4214 	rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
4215 	rej.scid = __cpu_to_le16(scid);
4216 	rej.dcid = __cpu_to_le16(dcid);
4217 
4218 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4219 }
4220 
4221 static inline int l2cap_config_req(struct l2cap_conn *conn,
4222 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4223 				   u8 *data)
4224 {
4225 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4226 	u16 dcid, flags;
4227 	u8 rsp[64];
4228 	struct l2cap_chan *chan;
4229 	int len, err = 0;
4230 
4231 	if (cmd_len < sizeof(*req))
4232 		return -EPROTO;
4233 
4234 	dcid  = __le16_to_cpu(req->dcid);
4235 	flags = __le16_to_cpu(req->flags);
4236 
4237 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4238 
4239 	chan = l2cap_get_chan_by_scid(conn, dcid);
4240 	if (!chan) {
4241 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4242 		return 0;
4243 	}
4244 
4245 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4246 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4247 				       chan->dcid);
4248 		goto unlock;
4249 	}
4250 
4251 	/* Reject if config buffer is too small. */
4252 	len = cmd_len - sizeof(*req);
4253 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4254 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4255 			       l2cap_build_conf_rsp(chan, rsp,
4256 			       L2CAP_CONF_REJECT, flags), rsp);
4257 		goto unlock;
4258 	}
4259 
4260 	/* Store config. */
4261 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4262 	chan->conf_len += len;
4263 
4264 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4265 		/* Incomplete config. Send empty response. */
4266 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4267 			       l2cap_build_conf_rsp(chan, rsp,
4268 			       L2CAP_CONF_SUCCESS, flags), rsp);
4269 		goto unlock;
4270 	}
4271 
4272 	/* Complete config. */
4273 	len = l2cap_parse_conf_req(chan, rsp);
4274 	if (len < 0) {
4275 		l2cap_send_disconn_req(chan, ECONNRESET);
4276 		goto unlock;
4277 	}
4278 
4279 	chan->ident = cmd->ident;
4280 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4281 	chan->num_conf_rsp++;
4282 
4283 	/* Reset config buffer. */
4284 	chan->conf_len = 0;
4285 
4286 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4287 		goto unlock;
4288 
4289 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4290 		set_default_fcs(chan);
4291 
4292 		if (chan->mode == L2CAP_MODE_ERTM ||
4293 		    chan->mode == L2CAP_MODE_STREAMING)
4294 			err = l2cap_ertm_init(chan);
4295 
4296 		if (err < 0)
4297 			l2cap_send_disconn_req(chan, -err);
4298 		else
4299 			l2cap_chan_ready(chan);
4300 
4301 		goto unlock;
4302 	}
4303 
4304 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4305 		u8 buf[64];
4306 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4307 			       l2cap_build_conf_req(chan, buf), buf);
4308 		chan->num_conf_req++;
4309 	}
4310 
4311 	/* Got Conf Rsp PENDING from remote side and asume we sent
4312 	   Conf Rsp PENDING in the code above */
4313 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4314 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4315 
4316 		/* check compatibility */
4317 
4318 		/* Send rsp for BR/EDR channel */
4319 		if (!chan->hs_hcon)
4320 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4321 		else
4322 			chan->ident = cmd->ident;
4323 	}
4324 
4325 unlock:
4326 	l2cap_chan_unlock(chan);
4327 	return err;
4328 }
4329 
4330 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4331 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4332 				   u8 *data)
4333 {
4334 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4335 	u16 scid, flags, result;
4336 	struct l2cap_chan *chan;
4337 	int len = cmd_len - sizeof(*rsp);
4338 	int err = 0;
4339 
4340 	if (cmd_len < sizeof(*rsp))
4341 		return -EPROTO;
4342 
4343 	scid   = __le16_to_cpu(rsp->scid);
4344 	flags  = __le16_to_cpu(rsp->flags);
4345 	result = __le16_to_cpu(rsp->result);
4346 
4347 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4348 	       result, len);
4349 
4350 	chan = l2cap_get_chan_by_scid(conn, scid);
4351 	if (!chan)
4352 		return 0;
4353 
4354 	switch (result) {
4355 	case L2CAP_CONF_SUCCESS:
4356 		l2cap_conf_rfc_get(chan, rsp->data, len);
4357 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4358 		break;
4359 
4360 	case L2CAP_CONF_PENDING:
4361 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4362 
4363 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4364 			char buf[64];
4365 
4366 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4367 						   buf, &result);
4368 			if (len < 0) {
4369 				l2cap_send_disconn_req(chan, ECONNRESET);
4370 				goto done;
4371 			}
4372 
4373 			if (!chan->hs_hcon) {
4374 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4375 							0);
4376 			} else {
4377 				if (l2cap_check_efs(chan)) {
4378 					amp_create_logical_link(chan);
4379 					chan->ident = cmd->ident;
4380 				}
4381 			}
4382 		}
4383 		goto done;
4384 
4385 	case L2CAP_CONF_UNACCEPT:
4386 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4387 			char req[64];
4388 
4389 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4390 				l2cap_send_disconn_req(chan, ECONNRESET);
4391 				goto done;
4392 			}
4393 
4394 			/* throw out any old stored conf requests */
4395 			result = L2CAP_CONF_SUCCESS;
4396 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4397 						   req, &result);
4398 			if (len < 0) {
4399 				l2cap_send_disconn_req(chan, ECONNRESET);
4400 				goto done;
4401 			}
4402 
4403 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4404 				       L2CAP_CONF_REQ, len, req);
4405 			chan->num_conf_req++;
4406 			if (result != L2CAP_CONF_SUCCESS)
4407 				goto done;
4408 			break;
4409 		}
4410 
4411 	default:
4412 		l2cap_chan_set_err(chan, ECONNRESET);
4413 
4414 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4415 		l2cap_send_disconn_req(chan, ECONNRESET);
4416 		goto done;
4417 	}
4418 
4419 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4420 		goto done;
4421 
4422 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4423 
4424 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4425 		set_default_fcs(chan);
4426 
4427 		if (chan->mode == L2CAP_MODE_ERTM ||
4428 		    chan->mode == L2CAP_MODE_STREAMING)
4429 			err = l2cap_ertm_init(chan);
4430 
4431 		if (err < 0)
4432 			l2cap_send_disconn_req(chan, -err);
4433 		else
4434 			l2cap_chan_ready(chan);
4435 	}
4436 
4437 done:
4438 	l2cap_chan_unlock(chan);
4439 	return err;
4440 }
4441 
4442 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4443 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4444 				       u8 *data)
4445 {
4446 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4447 	struct l2cap_disconn_rsp rsp;
4448 	u16 dcid, scid;
4449 	struct l2cap_chan *chan;
4450 
4451 	if (cmd_len != sizeof(*req))
4452 		return -EPROTO;
4453 
4454 	scid = __le16_to_cpu(req->scid);
4455 	dcid = __le16_to_cpu(req->dcid);
4456 
4457 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4458 
4459 	mutex_lock(&conn->chan_lock);
4460 
4461 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4462 	if (!chan) {
4463 		mutex_unlock(&conn->chan_lock);
4464 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4465 		return 0;
4466 	}
4467 
4468 	l2cap_chan_lock(chan);
4469 
4470 	rsp.dcid = cpu_to_le16(chan->scid);
4471 	rsp.scid = cpu_to_le16(chan->dcid);
4472 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4473 
4474 	chan->ops->set_shutdown(chan);
4475 
4476 	l2cap_chan_hold(chan);
4477 	l2cap_chan_del(chan, ECONNRESET);
4478 
4479 	l2cap_chan_unlock(chan);
4480 
4481 	chan->ops->close(chan);
4482 	l2cap_chan_put(chan);
4483 
4484 	mutex_unlock(&conn->chan_lock);
4485 
4486 	return 0;
4487 }
4488 
4489 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4490 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4491 				       u8 *data)
4492 {
4493 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4494 	u16 dcid, scid;
4495 	struct l2cap_chan *chan;
4496 
4497 	if (cmd_len != sizeof(*rsp))
4498 		return -EPROTO;
4499 
4500 	scid = __le16_to_cpu(rsp->scid);
4501 	dcid = __le16_to_cpu(rsp->dcid);
4502 
4503 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4504 
4505 	mutex_lock(&conn->chan_lock);
4506 
4507 	chan = __l2cap_get_chan_by_scid(conn, scid);
4508 	if (!chan) {
4509 		mutex_unlock(&conn->chan_lock);
4510 		return 0;
4511 	}
4512 
4513 	l2cap_chan_lock(chan);
4514 
4515 	l2cap_chan_hold(chan);
4516 	l2cap_chan_del(chan, 0);
4517 
4518 	l2cap_chan_unlock(chan);
4519 
4520 	chan->ops->close(chan);
4521 	l2cap_chan_put(chan);
4522 
4523 	mutex_unlock(&conn->chan_lock);
4524 
4525 	return 0;
4526 }
4527 
4528 static inline int l2cap_information_req(struct l2cap_conn *conn,
4529 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4530 					u8 *data)
4531 {
4532 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4533 	u16 type;
4534 
4535 	if (cmd_len != sizeof(*req))
4536 		return -EPROTO;
4537 
4538 	type = __le16_to_cpu(req->type);
4539 
4540 	BT_DBG("type 0x%4.4x", type);
4541 
4542 	if (type == L2CAP_IT_FEAT_MASK) {
4543 		u8 buf[8];
4544 		u32 feat_mask = l2cap_feat_mask;
4545 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4546 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4547 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4548 		if (!disable_ertm)
4549 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4550 				| L2CAP_FEAT_FCS;
4551 		if (conn->hs_enabled)
4552 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4553 				| L2CAP_FEAT_EXT_WINDOW;
4554 
4555 		put_unaligned_le32(feat_mask, rsp->data);
4556 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4557 			       buf);
4558 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4559 		u8 buf[12];
4560 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4561 
4562 		if (conn->hs_enabled)
4563 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4564 		else
4565 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4566 
4567 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4568 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4569 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4570 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4571 			       buf);
4572 	} else {
4573 		struct l2cap_info_rsp rsp;
4574 		rsp.type   = cpu_to_le16(type);
4575 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4576 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4577 			       &rsp);
4578 	}
4579 
4580 	return 0;
4581 }
4582 
4583 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4584 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4585 					u8 *data)
4586 {
4587 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4588 	u16 type, result;
4589 
4590 	if (cmd_len < sizeof(*rsp))
4591 		return -EPROTO;
4592 
4593 	type   = __le16_to_cpu(rsp->type);
4594 	result = __le16_to_cpu(rsp->result);
4595 
4596 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4597 
4598 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4599 	if (cmd->ident != conn->info_ident ||
4600 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4601 		return 0;
4602 
4603 	cancel_delayed_work(&conn->info_timer);
4604 
4605 	if (result != L2CAP_IR_SUCCESS) {
4606 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4607 		conn->info_ident = 0;
4608 
4609 		l2cap_conn_start(conn);
4610 
4611 		return 0;
4612 	}
4613 
4614 	switch (type) {
4615 	case L2CAP_IT_FEAT_MASK:
4616 		conn->feat_mask = get_unaligned_le32(rsp->data);
4617 
4618 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4619 			struct l2cap_info_req req;
4620 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4621 
4622 			conn->info_ident = l2cap_get_ident(conn);
4623 
4624 			l2cap_send_cmd(conn, conn->info_ident,
4625 				       L2CAP_INFO_REQ, sizeof(req), &req);
4626 		} else {
4627 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4628 			conn->info_ident = 0;
4629 
4630 			l2cap_conn_start(conn);
4631 		}
4632 		break;
4633 
4634 	case L2CAP_IT_FIXED_CHAN:
4635 		conn->fixed_chan_mask = rsp->data[0];
4636 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4637 		conn->info_ident = 0;
4638 
4639 		l2cap_conn_start(conn);
4640 		break;
4641 	}
4642 
4643 	return 0;
4644 }
4645 
4646 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4647 				    struct l2cap_cmd_hdr *cmd,
4648 				    u16 cmd_len, void *data)
4649 {
4650 	struct l2cap_create_chan_req *req = data;
4651 	struct l2cap_create_chan_rsp rsp;
4652 	struct l2cap_chan *chan;
4653 	struct hci_dev *hdev;
4654 	u16 psm, scid;
4655 
4656 	if (cmd_len != sizeof(*req))
4657 		return -EPROTO;
4658 
4659 	if (!conn->hs_enabled)
4660 		return -EINVAL;
4661 
4662 	psm = le16_to_cpu(req->psm);
4663 	scid = le16_to_cpu(req->scid);
4664 
4665 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4666 
4667 	/* For controller id 0 make BR/EDR connection */
4668 	if (req->amp_id == AMP_ID_BREDR) {
4669 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4670 			      req->amp_id);
4671 		return 0;
4672 	}
4673 
4674 	/* Validate AMP controller id */
4675 	hdev = hci_dev_get(req->amp_id);
4676 	if (!hdev)
4677 		goto error;
4678 
4679 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4680 		hci_dev_put(hdev);
4681 		goto error;
4682 	}
4683 
4684 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4685 			     req->amp_id);
4686 	if (chan) {
4687 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4688 		struct hci_conn *hs_hcon;
4689 
4690 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4691 						  &conn->hcon->dst);
4692 		if (!hs_hcon) {
4693 			hci_dev_put(hdev);
4694 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4695 					       chan->dcid);
4696 			return 0;
4697 		}
4698 
4699 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4700 
4701 		mgr->bredr_chan = chan;
4702 		chan->hs_hcon = hs_hcon;
4703 		chan->fcs = L2CAP_FCS_NONE;
4704 		conn->mtu = hdev->block_mtu;
4705 	}
4706 
4707 	hci_dev_put(hdev);
4708 
4709 	return 0;
4710 
4711 error:
4712 	rsp.dcid = 0;
4713 	rsp.scid = cpu_to_le16(scid);
4714 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4715 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4716 
4717 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4718 		       sizeof(rsp), &rsp);
4719 
4720 	return 0;
4721 }
4722 
4723 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4724 {
4725 	struct l2cap_move_chan_req req;
4726 	u8 ident;
4727 
4728 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4729 
4730 	ident = l2cap_get_ident(chan->conn);
4731 	chan->ident = ident;
4732 
4733 	req.icid = cpu_to_le16(chan->scid);
4734 	req.dest_amp_id = dest_amp_id;
4735 
4736 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4737 		       &req);
4738 
4739 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4740 }
4741 
4742 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4743 {
4744 	struct l2cap_move_chan_rsp rsp;
4745 
4746 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4747 
4748 	rsp.icid = cpu_to_le16(chan->dcid);
4749 	rsp.result = cpu_to_le16(result);
4750 
4751 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4752 		       sizeof(rsp), &rsp);
4753 }
4754 
4755 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4756 {
4757 	struct l2cap_move_chan_cfm cfm;
4758 
4759 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4760 
4761 	chan->ident = l2cap_get_ident(chan->conn);
4762 
4763 	cfm.icid = cpu_to_le16(chan->scid);
4764 	cfm.result = cpu_to_le16(result);
4765 
4766 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4767 		       sizeof(cfm), &cfm);
4768 
4769 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4770 }
4771 
4772 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4773 {
4774 	struct l2cap_move_chan_cfm cfm;
4775 
4776 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4777 
4778 	cfm.icid = cpu_to_le16(icid);
4779 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4780 
4781 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4782 		       sizeof(cfm), &cfm);
4783 }
4784 
4785 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4786 					 u16 icid)
4787 {
4788 	struct l2cap_move_chan_cfm_rsp rsp;
4789 
4790 	BT_DBG("icid 0x%4.4x", icid);
4791 
4792 	rsp.icid = cpu_to_le16(icid);
4793 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4794 }
4795 
4796 static void __release_logical_link(struct l2cap_chan *chan)
4797 {
4798 	chan->hs_hchan = NULL;
4799 	chan->hs_hcon = NULL;
4800 
4801 	/* Placeholder - release the logical link */
4802 }
4803 
4804 static void l2cap_logical_fail(struct l2cap_chan *chan)
4805 {
4806 	/* Logical link setup failed */
4807 	if (chan->state != BT_CONNECTED) {
4808 		/* Create channel failure, disconnect */
4809 		l2cap_send_disconn_req(chan, ECONNRESET);
4810 		return;
4811 	}
4812 
4813 	switch (chan->move_role) {
4814 	case L2CAP_MOVE_ROLE_RESPONDER:
4815 		l2cap_move_done(chan);
4816 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4817 		break;
4818 	case L2CAP_MOVE_ROLE_INITIATOR:
4819 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4820 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4821 			/* Remote has only sent pending or
4822 			 * success responses, clean up
4823 			 */
4824 			l2cap_move_done(chan);
4825 		}
4826 
4827 		/* Other amp move states imply that the move
4828 		 * has already aborted
4829 		 */
4830 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4831 		break;
4832 	}
4833 }
4834 
4835 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4836 					struct hci_chan *hchan)
4837 {
4838 	struct l2cap_conf_rsp rsp;
4839 
4840 	chan->hs_hchan = hchan;
4841 	chan->hs_hcon->l2cap_data = chan->conn;
4842 
4843 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4844 
4845 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4846 		int err;
4847 
4848 		set_default_fcs(chan);
4849 
4850 		err = l2cap_ertm_init(chan);
4851 		if (err < 0)
4852 			l2cap_send_disconn_req(chan, -err);
4853 		else
4854 			l2cap_chan_ready(chan);
4855 	}
4856 }
4857 
4858 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4859 				      struct hci_chan *hchan)
4860 {
4861 	chan->hs_hcon = hchan->conn;
4862 	chan->hs_hcon->l2cap_data = chan->conn;
4863 
4864 	BT_DBG("move_state %d", chan->move_state);
4865 
4866 	switch (chan->move_state) {
4867 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4868 		/* Move confirm will be sent after a success
4869 		 * response is received
4870 		 */
4871 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4872 		break;
4873 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4874 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4875 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4876 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4877 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4878 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4879 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4880 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4881 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4882 		}
4883 		break;
4884 	default:
4885 		/* Move was not in expected state, free the channel */
4886 		__release_logical_link(chan);
4887 
4888 		chan->move_state = L2CAP_MOVE_STABLE;
4889 	}
4890 }
4891 
4892 /* Call with chan locked */
4893 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4894 		       u8 status)
4895 {
4896 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4897 
4898 	if (status) {
4899 		l2cap_logical_fail(chan);
4900 		__release_logical_link(chan);
4901 		return;
4902 	}
4903 
4904 	if (chan->state != BT_CONNECTED) {
4905 		/* Ignore logical link if channel is on BR/EDR */
4906 		if (chan->local_amp_id != AMP_ID_BREDR)
4907 			l2cap_logical_finish_create(chan, hchan);
4908 	} else {
4909 		l2cap_logical_finish_move(chan, hchan);
4910 	}
4911 }
4912 
4913 void l2cap_move_start(struct l2cap_chan *chan)
4914 {
4915 	BT_DBG("chan %p", chan);
4916 
4917 	if (chan->local_amp_id == AMP_ID_BREDR) {
4918 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4919 			return;
4920 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4921 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4922 		/* Placeholder - start physical link setup */
4923 	} else {
4924 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4925 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4926 		chan->move_id = 0;
4927 		l2cap_move_setup(chan);
4928 		l2cap_send_move_chan_req(chan, 0);
4929 	}
4930 }
4931 
4932 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4933 			    u8 local_amp_id, u8 remote_amp_id)
4934 {
4935 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4936 	       local_amp_id, remote_amp_id);
4937 
4938 	chan->fcs = L2CAP_FCS_NONE;
4939 
4940 	/* Outgoing channel on AMP */
4941 	if (chan->state == BT_CONNECT) {
4942 		if (result == L2CAP_CR_SUCCESS) {
4943 			chan->local_amp_id = local_amp_id;
4944 			l2cap_send_create_chan_req(chan, remote_amp_id);
4945 		} else {
4946 			/* Revert to BR/EDR connect */
4947 			l2cap_send_conn_req(chan);
4948 		}
4949 
4950 		return;
4951 	}
4952 
4953 	/* Incoming channel on AMP */
4954 	if (__l2cap_no_conn_pending(chan)) {
4955 		struct l2cap_conn_rsp rsp;
4956 		char buf[128];
4957 		rsp.scid = cpu_to_le16(chan->dcid);
4958 		rsp.dcid = cpu_to_le16(chan->scid);
4959 
4960 		if (result == L2CAP_CR_SUCCESS) {
4961 			/* Send successful response */
4962 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4963 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4964 		} else {
4965 			/* Send negative response */
4966 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4967 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4968 		}
4969 
4970 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4971 			       sizeof(rsp), &rsp);
4972 
4973 		if (result == L2CAP_CR_SUCCESS) {
4974 			l2cap_state_change(chan, BT_CONFIG);
4975 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4976 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4977 				       L2CAP_CONF_REQ,
4978 				       l2cap_build_conf_req(chan, buf), buf);
4979 			chan->num_conf_req++;
4980 		}
4981 	}
4982 }
4983 
4984 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4985 				   u8 remote_amp_id)
4986 {
4987 	l2cap_move_setup(chan);
4988 	chan->move_id = local_amp_id;
4989 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4990 
4991 	l2cap_send_move_chan_req(chan, remote_amp_id);
4992 }
4993 
4994 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4995 {
4996 	struct hci_chan *hchan = NULL;
4997 
4998 	/* Placeholder - get hci_chan for logical link */
4999 
5000 	if (hchan) {
5001 		if (hchan->state == BT_CONNECTED) {
5002 			/* Logical link is ready to go */
5003 			chan->hs_hcon = hchan->conn;
5004 			chan->hs_hcon->l2cap_data = chan->conn;
5005 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5006 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5007 
5008 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5009 		} else {
5010 			/* Wait for logical link to be ready */
5011 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5012 		}
5013 	} else {
5014 		/* Logical link not available */
5015 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5016 	}
5017 }
5018 
5019 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5020 {
5021 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5022 		u8 rsp_result;
5023 		if (result == -EINVAL)
5024 			rsp_result = L2CAP_MR_BAD_ID;
5025 		else
5026 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5027 
5028 		l2cap_send_move_chan_rsp(chan, rsp_result);
5029 	}
5030 
5031 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5032 	chan->move_state = L2CAP_MOVE_STABLE;
5033 
5034 	/* Restart data transmission */
5035 	l2cap_ertm_send(chan);
5036 }
5037 
5038 /* Invoke with locked chan */
5039 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5040 {
5041 	u8 local_amp_id = chan->local_amp_id;
5042 	u8 remote_amp_id = chan->remote_amp_id;
5043 
5044 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5045 	       chan, result, local_amp_id, remote_amp_id);
5046 
5047 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
5048 		l2cap_chan_unlock(chan);
5049 		return;
5050 	}
5051 
5052 	if (chan->state != BT_CONNECTED) {
5053 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5054 	} else if (result != L2CAP_MR_SUCCESS) {
5055 		l2cap_do_move_cancel(chan, result);
5056 	} else {
5057 		switch (chan->move_role) {
5058 		case L2CAP_MOVE_ROLE_INITIATOR:
5059 			l2cap_do_move_initiate(chan, local_amp_id,
5060 					       remote_amp_id);
5061 			break;
5062 		case L2CAP_MOVE_ROLE_RESPONDER:
5063 			l2cap_do_move_respond(chan, result);
5064 			break;
5065 		default:
5066 			l2cap_do_move_cancel(chan, result);
5067 			break;
5068 		}
5069 	}
5070 }
5071 
5072 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5073 					 struct l2cap_cmd_hdr *cmd,
5074 					 u16 cmd_len, void *data)
5075 {
5076 	struct l2cap_move_chan_req *req = data;
5077 	struct l2cap_move_chan_rsp rsp;
5078 	struct l2cap_chan *chan;
5079 	u16 icid = 0;
5080 	u16 result = L2CAP_MR_NOT_ALLOWED;
5081 
5082 	if (cmd_len != sizeof(*req))
5083 		return -EPROTO;
5084 
5085 	icid = le16_to_cpu(req->icid);
5086 
5087 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5088 
5089 	if (!conn->hs_enabled)
5090 		return -EINVAL;
5091 
5092 	chan = l2cap_get_chan_by_dcid(conn, icid);
5093 	if (!chan) {
5094 		rsp.icid = cpu_to_le16(icid);
5095 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5096 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5097 			       sizeof(rsp), &rsp);
5098 		return 0;
5099 	}
5100 
5101 	chan->ident = cmd->ident;
5102 
5103 	if (chan->scid < L2CAP_CID_DYN_START ||
5104 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5105 	    (chan->mode != L2CAP_MODE_ERTM &&
5106 	     chan->mode != L2CAP_MODE_STREAMING)) {
5107 		result = L2CAP_MR_NOT_ALLOWED;
5108 		goto send_move_response;
5109 	}
5110 
5111 	if (chan->local_amp_id == req->dest_amp_id) {
5112 		result = L2CAP_MR_SAME_ID;
5113 		goto send_move_response;
5114 	}
5115 
5116 	if (req->dest_amp_id != AMP_ID_BREDR) {
5117 		struct hci_dev *hdev;
5118 		hdev = hci_dev_get(req->dest_amp_id);
5119 		if (!hdev || hdev->dev_type != HCI_AMP ||
5120 		    !test_bit(HCI_UP, &hdev->flags)) {
5121 			if (hdev)
5122 				hci_dev_put(hdev);
5123 
5124 			result = L2CAP_MR_BAD_ID;
5125 			goto send_move_response;
5126 		}
5127 		hci_dev_put(hdev);
5128 	}
5129 
5130 	/* Detect a move collision.  Only send a collision response
5131 	 * if this side has "lost", otherwise proceed with the move.
5132 	 * The winner has the larger bd_addr.
5133 	 */
5134 	if ((__chan_is_moving(chan) ||
5135 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5136 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5137 		result = L2CAP_MR_COLLISION;
5138 		goto send_move_response;
5139 	}
5140 
5141 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5142 	l2cap_move_setup(chan);
5143 	chan->move_id = req->dest_amp_id;
5144 	icid = chan->dcid;
5145 
5146 	if (req->dest_amp_id == AMP_ID_BREDR) {
5147 		/* Moving to BR/EDR */
5148 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5149 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5150 			result = L2CAP_MR_PEND;
5151 		} else {
5152 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5153 			result = L2CAP_MR_SUCCESS;
5154 		}
5155 	} else {
5156 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5157 		/* Placeholder - uncomment when amp functions are available */
5158 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5159 		result = L2CAP_MR_PEND;
5160 	}
5161 
5162 send_move_response:
5163 	l2cap_send_move_chan_rsp(chan, result);
5164 
5165 	l2cap_chan_unlock(chan);
5166 
5167 	return 0;
5168 }
5169 
5170 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5171 {
5172 	struct l2cap_chan *chan;
5173 	struct hci_chan *hchan = NULL;
5174 
5175 	chan = l2cap_get_chan_by_scid(conn, icid);
5176 	if (!chan) {
5177 		l2cap_send_move_chan_cfm_icid(conn, icid);
5178 		return;
5179 	}
5180 
5181 	__clear_chan_timer(chan);
5182 	if (result == L2CAP_MR_PEND)
5183 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5184 
5185 	switch (chan->move_state) {
5186 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5187 		/* Move confirm will be sent when logical link
5188 		 * is complete.
5189 		 */
5190 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5191 		break;
5192 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5193 		if (result == L2CAP_MR_PEND) {
5194 			break;
5195 		} else if (test_bit(CONN_LOCAL_BUSY,
5196 				    &chan->conn_state)) {
5197 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5198 		} else {
5199 			/* Logical link is up or moving to BR/EDR,
5200 			 * proceed with move
5201 			 */
5202 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5203 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5204 		}
5205 		break;
5206 	case L2CAP_MOVE_WAIT_RSP:
5207 		/* Moving to AMP */
5208 		if (result == L2CAP_MR_SUCCESS) {
5209 			/* Remote is ready, send confirm immediately
5210 			 * after logical link is ready
5211 			 */
5212 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5213 		} else {
5214 			/* Both logical link and move success
5215 			 * are required to confirm
5216 			 */
5217 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5218 		}
5219 
5220 		/* Placeholder - get hci_chan for logical link */
5221 		if (!hchan) {
5222 			/* Logical link not available */
5223 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5224 			break;
5225 		}
5226 
5227 		/* If the logical link is not yet connected, do not
5228 		 * send confirmation.
5229 		 */
5230 		if (hchan->state != BT_CONNECTED)
5231 			break;
5232 
5233 		/* Logical link is already ready to go */
5234 
5235 		chan->hs_hcon = hchan->conn;
5236 		chan->hs_hcon->l2cap_data = chan->conn;
5237 
5238 		if (result == L2CAP_MR_SUCCESS) {
5239 			/* Can confirm now */
5240 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5241 		} else {
5242 			/* Now only need move success
5243 			 * to confirm
5244 			 */
5245 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5246 		}
5247 
5248 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5249 		break;
5250 	default:
5251 		/* Any other amp move state means the move failed. */
5252 		chan->move_id = chan->local_amp_id;
5253 		l2cap_move_done(chan);
5254 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5255 	}
5256 
5257 	l2cap_chan_unlock(chan);
5258 }
5259 
5260 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5261 			    u16 result)
5262 {
5263 	struct l2cap_chan *chan;
5264 
5265 	chan = l2cap_get_chan_by_ident(conn, ident);
5266 	if (!chan) {
5267 		/* Could not locate channel, icid is best guess */
5268 		l2cap_send_move_chan_cfm_icid(conn, icid);
5269 		return;
5270 	}
5271 
5272 	__clear_chan_timer(chan);
5273 
5274 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5275 		if (result == L2CAP_MR_COLLISION) {
5276 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5277 		} else {
5278 			/* Cleanup - cancel move */
5279 			chan->move_id = chan->local_amp_id;
5280 			l2cap_move_done(chan);
5281 		}
5282 	}
5283 
5284 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5285 
5286 	l2cap_chan_unlock(chan);
5287 }
5288 
5289 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5290 				  struct l2cap_cmd_hdr *cmd,
5291 				  u16 cmd_len, void *data)
5292 {
5293 	struct l2cap_move_chan_rsp *rsp = data;
5294 	u16 icid, result;
5295 
5296 	if (cmd_len != sizeof(*rsp))
5297 		return -EPROTO;
5298 
5299 	icid = le16_to_cpu(rsp->icid);
5300 	result = le16_to_cpu(rsp->result);
5301 
5302 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5303 
5304 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5305 		l2cap_move_continue(conn, icid, result);
5306 	else
5307 		l2cap_move_fail(conn, cmd->ident, icid, result);
5308 
5309 	return 0;
5310 }
5311 
5312 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5313 				      struct l2cap_cmd_hdr *cmd,
5314 				      u16 cmd_len, void *data)
5315 {
5316 	struct l2cap_move_chan_cfm *cfm = data;
5317 	struct l2cap_chan *chan;
5318 	u16 icid, result;
5319 
5320 	if (cmd_len != sizeof(*cfm))
5321 		return -EPROTO;
5322 
5323 	icid = le16_to_cpu(cfm->icid);
5324 	result = le16_to_cpu(cfm->result);
5325 
5326 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5327 
5328 	chan = l2cap_get_chan_by_dcid(conn, icid);
5329 	if (!chan) {
5330 		/* Spec requires a response even if the icid was not found */
5331 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5332 		return 0;
5333 	}
5334 
5335 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5336 		if (result == L2CAP_MC_CONFIRMED) {
5337 			chan->local_amp_id = chan->move_id;
5338 			if (chan->local_amp_id == AMP_ID_BREDR)
5339 				__release_logical_link(chan);
5340 		} else {
5341 			chan->move_id = chan->local_amp_id;
5342 		}
5343 
5344 		l2cap_move_done(chan);
5345 	}
5346 
5347 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5348 
5349 	l2cap_chan_unlock(chan);
5350 
5351 	return 0;
5352 }
5353 
5354 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5355 						 struct l2cap_cmd_hdr *cmd,
5356 						 u16 cmd_len, void *data)
5357 {
5358 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5359 	struct l2cap_chan *chan;
5360 	u16 icid;
5361 
5362 	if (cmd_len != sizeof(*rsp))
5363 		return -EPROTO;
5364 
5365 	icid = le16_to_cpu(rsp->icid);
5366 
5367 	BT_DBG("icid 0x%4.4x", icid);
5368 
5369 	chan = l2cap_get_chan_by_scid(conn, icid);
5370 	if (!chan)
5371 		return 0;
5372 
5373 	__clear_chan_timer(chan);
5374 
5375 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5376 		chan->local_amp_id = chan->move_id;
5377 
5378 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5379 			__release_logical_link(chan);
5380 
5381 		l2cap_move_done(chan);
5382 	}
5383 
5384 	l2cap_chan_unlock(chan);
5385 
5386 	return 0;
5387 }
5388 
5389 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5390 					 u16 to_multiplier)
5391 {
5392 	u16 max_latency;
5393 
5394 	if (min > max || min < 6 || max > 3200)
5395 		return -EINVAL;
5396 
5397 	if (to_multiplier < 10 || to_multiplier > 3200)
5398 		return -EINVAL;
5399 
5400 	if (max >= to_multiplier * 8)
5401 		return -EINVAL;
5402 
5403 	max_latency = (to_multiplier * 8 / max) - 1;
5404 	if (latency > 499 || latency > max_latency)
5405 		return -EINVAL;
5406 
5407 	return 0;
5408 }
5409 
5410 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5411 					      struct l2cap_cmd_hdr *cmd,
5412 					      u16 cmd_len, u8 *data)
5413 {
5414 	struct hci_conn *hcon = conn->hcon;
5415 	struct l2cap_conn_param_update_req *req;
5416 	struct l2cap_conn_param_update_rsp rsp;
5417 	u16 min, max, latency, to_multiplier;
5418 	int err;
5419 
5420 	if (!(hcon->link_mode & HCI_LM_MASTER))
5421 		return -EINVAL;
5422 
5423 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5424 		return -EPROTO;
5425 
5426 	req = (struct l2cap_conn_param_update_req *) data;
5427 	min		= __le16_to_cpu(req->min);
5428 	max		= __le16_to_cpu(req->max);
5429 	latency		= __le16_to_cpu(req->latency);
5430 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5431 
5432 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5433 	       min, max, latency, to_multiplier);
5434 
5435 	memset(&rsp, 0, sizeof(rsp));
5436 
5437 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5438 	if (err)
5439 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5440 	else
5441 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5442 
5443 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5444 		       sizeof(rsp), &rsp);
5445 
5446 	if (!err)
5447 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5448 
5449 	return 0;
5450 }
5451 
5452 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5453 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5454 				u8 *data)
5455 {
5456 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5457 	u16 dcid, mtu, mps, credits, result;
5458 	struct l2cap_chan *chan;
5459 	int err;
5460 
5461 	if (cmd_len < sizeof(*rsp))
5462 		return -EPROTO;
5463 
5464 	dcid    = __le16_to_cpu(rsp->dcid);
5465 	mtu     = __le16_to_cpu(rsp->mtu);
5466 	mps     = __le16_to_cpu(rsp->mps);
5467 	credits = __le16_to_cpu(rsp->credits);
5468 	result  = __le16_to_cpu(rsp->result);
5469 
5470 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5471 		return -EPROTO;
5472 
5473 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5474 	       dcid, mtu, mps, credits, result);
5475 
5476 	mutex_lock(&conn->chan_lock);
5477 
5478 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5479 	if (!chan) {
5480 		err = -EBADSLT;
5481 		goto unlock;
5482 	}
5483 
5484 	err = 0;
5485 
5486 	l2cap_chan_lock(chan);
5487 
5488 	switch (result) {
5489 	case L2CAP_CR_SUCCESS:
5490 		chan->ident = 0;
5491 		chan->dcid = dcid;
5492 		chan->omtu = mtu;
5493 		chan->remote_mps = mps;
5494 		chan->tx_credits = credits;
5495 		l2cap_chan_ready(chan);
5496 		break;
5497 
5498 	default:
5499 		l2cap_chan_del(chan, ECONNREFUSED);
5500 		break;
5501 	}
5502 
5503 	l2cap_chan_unlock(chan);
5504 
5505 unlock:
5506 	mutex_unlock(&conn->chan_lock);
5507 
5508 	return err;
5509 }
5510 
5511 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5512 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5513 				      u8 *data)
5514 {
5515 	int err = 0;
5516 
5517 	switch (cmd->code) {
5518 	case L2CAP_COMMAND_REJ:
5519 		l2cap_command_rej(conn, cmd, cmd_len, data);
5520 		break;
5521 
5522 	case L2CAP_CONN_REQ:
5523 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5524 		break;
5525 
5526 	case L2CAP_CONN_RSP:
5527 	case L2CAP_CREATE_CHAN_RSP:
5528 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5529 		break;
5530 
5531 	case L2CAP_CONF_REQ:
5532 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5533 		break;
5534 
5535 	case L2CAP_CONF_RSP:
5536 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5537 		break;
5538 
5539 	case L2CAP_DISCONN_REQ:
5540 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5541 		break;
5542 
5543 	case L2CAP_DISCONN_RSP:
5544 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5545 		break;
5546 
5547 	case L2CAP_ECHO_REQ:
5548 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5549 		break;
5550 
5551 	case L2CAP_ECHO_RSP:
5552 		break;
5553 
5554 	case L2CAP_INFO_REQ:
5555 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5556 		break;
5557 
5558 	case L2CAP_INFO_RSP:
5559 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5560 		break;
5561 
5562 	case L2CAP_CREATE_CHAN_REQ:
5563 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5564 		break;
5565 
5566 	case L2CAP_MOVE_CHAN_REQ:
5567 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5568 		break;
5569 
5570 	case L2CAP_MOVE_CHAN_RSP:
5571 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5572 		break;
5573 
5574 	case L2CAP_MOVE_CHAN_CFM:
5575 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5576 		break;
5577 
5578 	case L2CAP_MOVE_CHAN_CFM_RSP:
5579 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5580 		break;
5581 
5582 	default:
5583 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5584 		err = -EINVAL;
5585 		break;
5586 	}
5587 
5588 	return err;
5589 }
5590 
5591 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5592 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5593 				u8 *data)
5594 {
5595 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5596 	struct l2cap_le_conn_rsp rsp;
5597 	struct l2cap_chan *chan, *pchan;
5598 	u16 dcid, scid, credits, mtu, mps;
5599 	__le16 psm;
5600 	u8 result;
5601 
5602 	if (cmd_len != sizeof(*req))
5603 		return -EPROTO;
5604 
5605 	scid = __le16_to_cpu(req->scid);
5606 	mtu  = __le16_to_cpu(req->mtu);
5607 	mps  = __le16_to_cpu(req->mps);
5608 	psm  = req->psm;
5609 	dcid = 0;
5610 	credits = 0;
5611 
5612 	if (mtu < 23 || mps < 23)
5613 		return -EPROTO;
5614 
5615 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5616 	       scid, mtu, mps);
5617 
5618 	/* Check if we have socket listening on psm */
5619 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5620 					 &conn->hcon->dst, LE_LINK);
5621 	if (!pchan) {
5622 		result = L2CAP_CR_BAD_PSM;
5623 		chan = NULL;
5624 		goto response;
5625 	}
5626 
5627 	mutex_lock(&conn->chan_lock);
5628 	l2cap_chan_lock(pchan);
5629 
5630 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5631 		result = L2CAP_CR_AUTHENTICATION;
5632 		chan = NULL;
5633 		goto response_unlock;
5634 	}
5635 
5636 	/* Check if we already have channel with that dcid */
5637 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5638 		result = L2CAP_CR_NO_MEM;
5639 		chan = NULL;
5640 		goto response_unlock;
5641 	}
5642 
5643 	chan = pchan->ops->new_connection(pchan);
5644 	if (!chan) {
5645 		result = L2CAP_CR_NO_MEM;
5646 		goto response_unlock;
5647 	}
5648 
5649 	l2cap_le_flowctl_init(chan);
5650 
5651 	bacpy(&chan->src, &conn->hcon->src);
5652 	bacpy(&chan->dst, &conn->hcon->dst);
5653 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5654 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5655 	chan->psm  = psm;
5656 	chan->dcid = scid;
5657 	chan->omtu = mtu;
5658 	chan->remote_mps = mps;
5659 	chan->tx_credits = __le16_to_cpu(req->credits);
5660 
5661 	__l2cap_chan_add(conn, chan);
5662 	dcid = chan->scid;
5663 	credits = chan->rx_credits;
5664 
5665 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5666 
5667 	chan->ident = cmd->ident;
5668 
5669 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5670 		l2cap_state_change(chan, BT_CONNECT2);
5671 		result = L2CAP_CR_PEND;
5672 		chan->ops->defer(chan);
5673 	} else {
5674 		l2cap_chan_ready(chan);
5675 		result = L2CAP_CR_SUCCESS;
5676 	}
5677 
5678 response_unlock:
5679 	l2cap_chan_unlock(pchan);
5680 	mutex_unlock(&conn->chan_lock);
5681 
5682 	if (result == L2CAP_CR_PEND)
5683 		return 0;
5684 
5685 response:
5686 	if (chan) {
5687 		rsp.mtu = cpu_to_le16(chan->imtu);
5688 		rsp.mps = cpu_to_le16(chan->mps);
5689 	} else {
5690 		rsp.mtu = 0;
5691 		rsp.mps = 0;
5692 	}
5693 
5694 	rsp.dcid    = cpu_to_le16(dcid);
5695 	rsp.credits = cpu_to_le16(credits);
5696 	rsp.result  = cpu_to_le16(result);
5697 
5698 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5699 
5700 	return 0;
5701 }
5702 
5703 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5704 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5705 				   u8 *data)
5706 {
5707 	struct l2cap_le_credits *pkt;
5708 	struct l2cap_chan *chan;
5709 	u16 cid, credits;
5710 
5711 	if (cmd_len != sizeof(*pkt))
5712 		return -EPROTO;
5713 
5714 	pkt = (struct l2cap_le_credits *) data;
5715 	cid	= __le16_to_cpu(pkt->cid);
5716 	credits	= __le16_to_cpu(pkt->credits);
5717 
5718 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5719 
5720 	chan = l2cap_get_chan_by_dcid(conn, cid);
5721 	if (!chan)
5722 		return -EBADSLT;
5723 
5724 	chan->tx_credits += credits;
5725 
5726 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5727 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5728 		chan->tx_credits--;
5729 	}
5730 
5731 	if (chan->tx_credits)
5732 		chan->ops->resume(chan);
5733 
5734 	l2cap_chan_unlock(chan);
5735 
5736 	return 0;
5737 }
5738 
5739 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5740 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5741 				       u8 *data)
5742 {
5743 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5744 	struct l2cap_chan *chan;
5745 
5746 	if (cmd_len < sizeof(*rej))
5747 		return -EPROTO;
5748 
5749 	mutex_lock(&conn->chan_lock);
5750 
5751 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5752 	if (!chan)
5753 		goto done;
5754 
5755 	l2cap_chan_lock(chan);
5756 	l2cap_chan_del(chan, ECONNREFUSED);
5757 	l2cap_chan_unlock(chan);
5758 
5759 done:
5760 	mutex_unlock(&conn->chan_lock);
5761 	return 0;
5762 }
5763 
5764 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5765 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5766 				   u8 *data)
5767 {
5768 	int err = 0;
5769 
5770 	if (!enable_lecoc) {
5771 		switch (cmd->code) {
5772 		case L2CAP_LE_CONN_REQ:
5773 		case L2CAP_LE_CONN_RSP:
5774 		case L2CAP_LE_CREDITS:
5775 		case L2CAP_DISCONN_REQ:
5776 		case L2CAP_DISCONN_RSP:
5777 			return -EINVAL;
5778 		}
5779 	}
5780 
5781 	switch (cmd->code) {
5782 	case L2CAP_COMMAND_REJ:
5783 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5784 		break;
5785 
5786 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5787 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5788 		break;
5789 
5790 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5791 		break;
5792 
5793 	case L2CAP_LE_CONN_RSP:
5794 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5795 		break;
5796 
5797 	case L2CAP_LE_CONN_REQ:
5798 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5799 		break;
5800 
5801 	case L2CAP_LE_CREDITS:
5802 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5803 		break;
5804 
5805 	case L2CAP_DISCONN_REQ:
5806 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5807 		break;
5808 
5809 	case L2CAP_DISCONN_RSP:
5810 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5811 		break;
5812 
5813 	default:
5814 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5815 		err = -EINVAL;
5816 		break;
5817 	}
5818 
5819 	return err;
5820 }
5821 
5822 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5823 					struct sk_buff *skb)
5824 {
5825 	struct hci_conn *hcon = conn->hcon;
5826 	struct l2cap_cmd_hdr *cmd;
5827 	u16 len;
5828 	int err;
5829 
5830 	if (hcon->type != LE_LINK)
5831 		goto drop;
5832 
5833 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5834 		goto drop;
5835 
5836 	cmd = (void *) skb->data;
5837 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5838 
5839 	len = le16_to_cpu(cmd->len);
5840 
5841 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5842 
5843 	if (len != skb->len || !cmd->ident) {
5844 		BT_DBG("corrupted command");
5845 		goto drop;
5846 	}
5847 
5848 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5849 	if (err) {
5850 		struct l2cap_cmd_rej_unk rej;
5851 
5852 		BT_ERR("Wrong link type (%d)", err);
5853 
5854 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5855 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5856 			       sizeof(rej), &rej);
5857 	}
5858 
5859 drop:
5860 	kfree_skb(skb);
5861 }
5862 
5863 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5864 				     struct sk_buff *skb)
5865 {
5866 	struct hci_conn *hcon = conn->hcon;
5867 	u8 *data = skb->data;
5868 	int len = skb->len;
5869 	struct l2cap_cmd_hdr cmd;
5870 	int err;
5871 
5872 	l2cap_raw_recv(conn, skb);
5873 
5874 	if (hcon->type != ACL_LINK)
5875 		goto drop;
5876 
5877 	while (len >= L2CAP_CMD_HDR_SIZE) {
5878 		u16 cmd_len;
5879 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5880 		data += L2CAP_CMD_HDR_SIZE;
5881 		len  -= L2CAP_CMD_HDR_SIZE;
5882 
5883 		cmd_len = le16_to_cpu(cmd.len);
5884 
5885 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5886 		       cmd.ident);
5887 
5888 		if (cmd_len > len || !cmd.ident) {
5889 			BT_DBG("corrupted command");
5890 			break;
5891 		}
5892 
5893 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5894 		if (err) {
5895 			struct l2cap_cmd_rej_unk rej;
5896 
5897 			BT_ERR("Wrong link type (%d)", err);
5898 
5899 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5900 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5901 				       sizeof(rej), &rej);
5902 		}
5903 
5904 		data += cmd_len;
5905 		len  -= cmd_len;
5906 	}
5907 
5908 drop:
5909 	kfree_skb(skb);
5910 }
5911 
5912 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5913 {
5914 	u16 our_fcs, rcv_fcs;
5915 	int hdr_size;
5916 
5917 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5918 		hdr_size = L2CAP_EXT_HDR_SIZE;
5919 	else
5920 		hdr_size = L2CAP_ENH_HDR_SIZE;
5921 
5922 	if (chan->fcs == L2CAP_FCS_CRC16) {
5923 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5924 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5925 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5926 
5927 		if (our_fcs != rcv_fcs)
5928 			return -EBADMSG;
5929 	}
5930 	return 0;
5931 }
5932 
5933 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5934 {
5935 	struct l2cap_ctrl control;
5936 
5937 	BT_DBG("chan %p", chan);
5938 
5939 	memset(&control, 0, sizeof(control));
5940 	control.sframe = 1;
5941 	control.final = 1;
5942 	control.reqseq = chan->buffer_seq;
5943 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5944 
5945 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5946 		control.super = L2CAP_SUPER_RNR;
5947 		l2cap_send_sframe(chan, &control);
5948 	}
5949 
5950 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5951 	    chan->unacked_frames > 0)
5952 		__set_retrans_timer(chan);
5953 
5954 	/* Send pending iframes */
5955 	l2cap_ertm_send(chan);
5956 
5957 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5958 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5959 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5960 		 * send it now.
5961 		 */
5962 		control.super = L2CAP_SUPER_RR;
5963 		l2cap_send_sframe(chan, &control);
5964 	}
5965 }
5966 
5967 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5968 			    struct sk_buff **last_frag)
5969 {
5970 	/* skb->len reflects data in skb as well as all fragments
5971 	 * skb->data_len reflects only data in fragments
5972 	 */
5973 	if (!skb_has_frag_list(skb))
5974 		skb_shinfo(skb)->frag_list = new_frag;
5975 
5976 	new_frag->next = NULL;
5977 
5978 	(*last_frag)->next = new_frag;
5979 	*last_frag = new_frag;
5980 
5981 	skb->len += new_frag->len;
5982 	skb->data_len += new_frag->len;
5983 	skb->truesize += new_frag->truesize;
5984 }
5985 
5986 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5987 				struct l2cap_ctrl *control)
5988 {
5989 	int err = -EINVAL;
5990 
5991 	switch (control->sar) {
5992 	case L2CAP_SAR_UNSEGMENTED:
5993 		if (chan->sdu)
5994 			break;
5995 
5996 		err = chan->ops->recv(chan, skb);
5997 		break;
5998 
5999 	case L2CAP_SAR_START:
6000 		if (chan->sdu)
6001 			break;
6002 
6003 		chan->sdu_len = get_unaligned_le16(skb->data);
6004 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6005 
6006 		if (chan->sdu_len > chan->imtu) {
6007 			err = -EMSGSIZE;
6008 			break;
6009 		}
6010 
6011 		if (skb->len >= chan->sdu_len)
6012 			break;
6013 
6014 		chan->sdu = skb;
6015 		chan->sdu_last_frag = skb;
6016 
6017 		skb = NULL;
6018 		err = 0;
6019 		break;
6020 
6021 	case L2CAP_SAR_CONTINUE:
6022 		if (!chan->sdu)
6023 			break;
6024 
6025 		append_skb_frag(chan->sdu, skb,
6026 				&chan->sdu_last_frag);
6027 		skb = NULL;
6028 
6029 		if (chan->sdu->len >= chan->sdu_len)
6030 			break;
6031 
6032 		err = 0;
6033 		break;
6034 
6035 	case L2CAP_SAR_END:
6036 		if (!chan->sdu)
6037 			break;
6038 
6039 		append_skb_frag(chan->sdu, skb,
6040 				&chan->sdu_last_frag);
6041 		skb = NULL;
6042 
6043 		if (chan->sdu->len != chan->sdu_len)
6044 			break;
6045 
6046 		err = chan->ops->recv(chan, chan->sdu);
6047 
6048 		if (!err) {
6049 			/* Reassembly complete */
6050 			chan->sdu = NULL;
6051 			chan->sdu_last_frag = NULL;
6052 			chan->sdu_len = 0;
6053 		}
6054 		break;
6055 	}
6056 
6057 	if (err) {
6058 		kfree_skb(skb);
6059 		kfree_skb(chan->sdu);
6060 		chan->sdu = NULL;
6061 		chan->sdu_last_frag = NULL;
6062 		chan->sdu_len = 0;
6063 	}
6064 
6065 	return err;
6066 }
6067 
6068 static int l2cap_resegment(struct l2cap_chan *chan)
6069 {
6070 	/* Placeholder */
6071 	return 0;
6072 }
6073 
6074 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6075 {
6076 	u8 event;
6077 
6078 	if (chan->mode != L2CAP_MODE_ERTM)
6079 		return;
6080 
6081 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6082 	l2cap_tx(chan, NULL, NULL, event);
6083 }
6084 
6085 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6086 {
6087 	int err = 0;
6088 	/* Pass sequential frames to l2cap_reassemble_sdu()
6089 	 * until a gap is encountered.
6090 	 */
6091 
6092 	BT_DBG("chan %p", chan);
6093 
6094 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6095 		struct sk_buff *skb;
6096 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6097 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6098 
6099 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6100 
6101 		if (!skb)
6102 			break;
6103 
6104 		skb_unlink(skb, &chan->srej_q);
6105 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6106 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
6107 		if (err)
6108 			break;
6109 	}
6110 
6111 	if (skb_queue_empty(&chan->srej_q)) {
6112 		chan->rx_state = L2CAP_RX_STATE_RECV;
6113 		l2cap_send_ack(chan);
6114 	}
6115 
6116 	return err;
6117 }
6118 
6119 static void l2cap_handle_srej(struct l2cap_chan *chan,
6120 			      struct l2cap_ctrl *control)
6121 {
6122 	struct sk_buff *skb;
6123 
6124 	BT_DBG("chan %p, control %p", chan, control);
6125 
6126 	if (control->reqseq == chan->next_tx_seq) {
6127 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6128 		l2cap_send_disconn_req(chan, ECONNRESET);
6129 		return;
6130 	}
6131 
6132 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6133 
6134 	if (skb == NULL) {
6135 		BT_DBG("Seq %d not available for retransmission",
6136 		       control->reqseq);
6137 		return;
6138 	}
6139 
6140 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
6141 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6142 		l2cap_send_disconn_req(chan, ECONNRESET);
6143 		return;
6144 	}
6145 
6146 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6147 
6148 	if (control->poll) {
6149 		l2cap_pass_to_tx(chan, control);
6150 
6151 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6152 		l2cap_retransmit(chan, control);
6153 		l2cap_ertm_send(chan);
6154 
6155 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6156 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6157 			chan->srej_save_reqseq = control->reqseq;
6158 		}
6159 	} else {
6160 		l2cap_pass_to_tx_fbit(chan, control);
6161 
6162 		if (control->final) {
6163 			if (chan->srej_save_reqseq != control->reqseq ||
6164 			    !test_and_clear_bit(CONN_SREJ_ACT,
6165 						&chan->conn_state))
6166 				l2cap_retransmit(chan, control);
6167 		} else {
6168 			l2cap_retransmit(chan, control);
6169 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6170 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6171 				chan->srej_save_reqseq = control->reqseq;
6172 			}
6173 		}
6174 	}
6175 }
6176 
6177 static void l2cap_handle_rej(struct l2cap_chan *chan,
6178 			     struct l2cap_ctrl *control)
6179 {
6180 	struct sk_buff *skb;
6181 
6182 	BT_DBG("chan %p, control %p", chan, control);
6183 
6184 	if (control->reqseq == chan->next_tx_seq) {
6185 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6186 		l2cap_send_disconn_req(chan, ECONNRESET);
6187 		return;
6188 	}
6189 
6190 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6191 
6192 	if (chan->max_tx && skb &&
6193 	    bt_cb(skb)->control.retries >= chan->max_tx) {
6194 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6195 		l2cap_send_disconn_req(chan, ECONNRESET);
6196 		return;
6197 	}
6198 
6199 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6200 
6201 	l2cap_pass_to_tx(chan, control);
6202 
6203 	if (control->final) {
6204 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6205 			l2cap_retransmit_all(chan, control);
6206 	} else {
6207 		l2cap_retransmit_all(chan, control);
6208 		l2cap_ertm_send(chan);
6209 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6210 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6211 	}
6212 }
6213 
6214 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6215 {
6216 	BT_DBG("chan %p, txseq %d", chan, txseq);
6217 
6218 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6219 	       chan->expected_tx_seq);
6220 
6221 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6222 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6223 		    chan->tx_win) {
6224 			/* See notes below regarding "double poll" and
6225 			 * invalid packets.
6226 			 */
6227 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6228 				BT_DBG("Invalid/Ignore - after SREJ");
6229 				return L2CAP_TXSEQ_INVALID_IGNORE;
6230 			} else {
6231 				BT_DBG("Invalid - in window after SREJ sent");
6232 				return L2CAP_TXSEQ_INVALID;
6233 			}
6234 		}
6235 
6236 		if (chan->srej_list.head == txseq) {
6237 			BT_DBG("Expected SREJ");
6238 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6239 		}
6240 
6241 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6242 			BT_DBG("Duplicate SREJ - txseq already stored");
6243 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6244 		}
6245 
6246 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6247 			BT_DBG("Unexpected SREJ - not requested");
6248 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6249 		}
6250 	}
6251 
6252 	if (chan->expected_tx_seq == txseq) {
6253 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6254 		    chan->tx_win) {
6255 			BT_DBG("Invalid - txseq outside tx window");
6256 			return L2CAP_TXSEQ_INVALID;
6257 		} else {
6258 			BT_DBG("Expected");
6259 			return L2CAP_TXSEQ_EXPECTED;
6260 		}
6261 	}
6262 
6263 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6264 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6265 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6266 		return L2CAP_TXSEQ_DUPLICATE;
6267 	}
6268 
6269 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6270 		/* A source of invalid packets is a "double poll" condition,
6271 		 * where delays cause us to send multiple poll packets.  If
6272 		 * the remote stack receives and processes both polls,
6273 		 * sequence numbers can wrap around in such a way that a
6274 		 * resent frame has a sequence number that looks like new data
6275 		 * with a sequence gap.  This would trigger an erroneous SREJ
6276 		 * request.
6277 		 *
6278 		 * Fortunately, this is impossible with a tx window that's
6279 		 * less than half of the maximum sequence number, which allows
6280 		 * invalid frames to be safely ignored.
6281 		 *
6282 		 * With tx window sizes greater than half of the tx window
6283 		 * maximum, the frame is invalid and cannot be ignored.  This
6284 		 * causes a disconnect.
6285 		 */
6286 
6287 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6288 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6289 			return L2CAP_TXSEQ_INVALID_IGNORE;
6290 		} else {
6291 			BT_DBG("Invalid - txseq outside tx window");
6292 			return L2CAP_TXSEQ_INVALID;
6293 		}
6294 	} else {
6295 		BT_DBG("Unexpected - txseq indicates missing frames");
6296 		return L2CAP_TXSEQ_UNEXPECTED;
6297 	}
6298 }
6299 
6300 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6301 			       struct l2cap_ctrl *control,
6302 			       struct sk_buff *skb, u8 event)
6303 {
6304 	int err = 0;
6305 	bool skb_in_use = false;
6306 
6307 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6308 	       event);
6309 
6310 	switch (event) {
6311 	case L2CAP_EV_RECV_IFRAME:
6312 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6313 		case L2CAP_TXSEQ_EXPECTED:
6314 			l2cap_pass_to_tx(chan, control);
6315 
6316 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6317 				BT_DBG("Busy, discarding expected seq %d",
6318 				       control->txseq);
6319 				break;
6320 			}
6321 
6322 			chan->expected_tx_seq = __next_seq(chan,
6323 							   control->txseq);
6324 
6325 			chan->buffer_seq = chan->expected_tx_seq;
6326 			skb_in_use = true;
6327 
6328 			err = l2cap_reassemble_sdu(chan, skb, control);
6329 			if (err)
6330 				break;
6331 
6332 			if (control->final) {
6333 				if (!test_and_clear_bit(CONN_REJ_ACT,
6334 							&chan->conn_state)) {
6335 					control->final = 0;
6336 					l2cap_retransmit_all(chan, control);
6337 					l2cap_ertm_send(chan);
6338 				}
6339 			}
6340 
6341 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6342 				l2cap_send_ack(chan);
6343 			break;
6344 		case L2CAP_TXSEQ_UNEXPECTED:
6345 			l2cap_pass_to_tx(chan, control);
6346 
6347 			/* Can't issue SREJ frames in the local busy state.
6348 			 * Drop this frame, it will be seen as missing
6349 			 * when local busy is exited.
6350 			 */
6351 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6352 				BT_DBG("Busy, discarding unexpected seq %d",
6353 				       control->txseq);
6354 				break;
6355 			}
6356 
6357 			/* There was a gap in the sequence, so an SREJ
6358 			 * must be sent for each missing frame.  The
6359 			 * current frame is stored for later use.
6360 			 */
6361 			skb_queue_tail(&chan->srej_q, skb);
6362 			skb_in_use = true;
6363 			BT_DBG("Queued %p (queue len %d)", skb,
6364 			       skb_queue_len(&chan->srej_q));
6365 
6366 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6367 			l2cap_seq_list_clear(&chan->srej_list);
6368 			l2cap_send_srej(chan, control->txseq);
6369 
6370 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6371 			break;
6372 		case L2CAP_TXSEQ_DUPLICATE:
6373 			l2cap_pass_to_tx(chan, control);
6374 			break;
6375 		case L2CAP_TXSEQ_INVALID_IGNORE:
6376 			break;
6377 		case L2CAP_TXSEQ_INVALID:
6378 		default:
6379 			l2cap_send_disconn_req(chan, ECONNRESET);
6380 			break;
6381 		}
6382 		break;
6383 	case L2CAP_EV_RECV_RR:
6384 		l2cap_pass_to_tx(chan, control);
6385 		if (control->final) {
6386 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6387 
6388 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6389 			    !__chan_is_moving(chan)) {
6390 				control->final = 0;
6391 				l2cap_retransmit_all(chan, control);
6392 			}
6393 
6394 			l2cap_ertm_send(chan);
6395 		} else if (control->poll) {
6396 			l2cap_send_i_or_rr_or_rnr(chan);
6397 		} else {
6398 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6399 					       &chan->conn_state) &&
6400 			    chan->unacked_frames)
6401 				__set_retrans_timer(chan);
6402 
6403 			l2cap_ertm_send(chan);
6404 		}
6405 		break;
6406 	case L2CAP_EV_RECV_RNR:
6407 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6408 		l2cap_pass_to_tx(chan, control);
6409 		if (control && control->poll) {
6410 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6411 			l2cap_send_rr_or_rnr(chan, 0);
6412 		}
6413 		__clear_retrans_timer(chan);
6414 		l2cap_seq_list_clear(&chan->retrans_list);
6415 		break;
6416 	case L2CAP_EV_RECV_REJ:
6417 		l2cap_handle_rej(chan, control);
6418 		break;
6419 	case L2CAP_EV_RECV_SREJ:
6420 		l2cap_handle_srej(chan, control);
6421 		break;
6422 	default:
6423 		break;
6424 	}
6425 
6426 	if (skb && !skb_in_use) {
6427 		BT_DBG("Freeing %p", skb);
6428 		kfree_skb(skb);
6429 	}
6430 
6431 	return err;
6432 }
6433 
6434 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6435 				    struct l2cap_ctrl *control,
6436 				    struct sk_buff *skb, u8 event)
6437 {
6438 	int err = 0;
6439 	u16 txseq = control->txseq;
6440 	bool skb_in_use = false;
6441 
6442 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6443 	       event);
6444 
6445 	switch (event) {
6446 	case L2CAP_EV_RECV_IFRAME:
6447 		switch (l2cap_classify_txseq(chan, txseq)) {
6448 		case L2CAP_TXSEQ_EXPECTED:
6449 			/* Keep frame for reassembly later */
6450 			l2cap_pass_to_tx(chan, control);
6451 			skb_queue_tail(&chan->srej_q, skb);
6452 			skb_in_use = true;
6453 			BT_DBG("Queued %p (queue len %d)", skb,
6454 			       skb_queue_len(&chan->srej_q));
6455 
6456 			chan->expected_tx_seq = __next_seq(chan, txseq);
6457 			break;
6458 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6459 			l2cap_seq_list_pop(&chan->srej_list);
6460 
6461 			l2cap_pass_to_tx(chan, control);
6462 			skb_queue_tail(&chan->srej_q, skb);
6463 			skb_in_use = true;
6464 			BT_DBG("Queued %p (queue len %d)", skb,
6465 			       skb_queue_len(&chan->srej_q));
6466 
6467 			err = l2cap_rx_queued_iframes(chan);
6468 			if (err)
6469 				break;
6470 
6471 			break;
6472 		case L2CAP_TXSEQ_UNEXPECTED:
6473 			/* Got a frame that can't be reassembled yet.
6474 			 * Save it for later, and send SREJs to cover
6475 			 * the missing frames.
6476 			 */
6477 			skb_queue_tail(&chan->srej_q, skb);
6478 			skb_in_use = true;
6479 			BT_DBG("Queued %p (queue len %d)", skb,
6480 			       skb_queue_len(&chan->srej_q));
6481 
6482 			l2cap_pass_to_tx(chan, control);
6483 			l2cap_send_srej(chan, control->txseq);
6484 			break;
6485 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6486 			/* This frame was requested with an SREJ, but
6487 			 * some expected retransmitted frames are
6488 			 * missing.  Request retransmission of missing
6489 			 * SREJ'd frames.
6490 			 */
6491 			skb_queue_tail(&chan->srej_q, skb);
6492 			skb_in_use = true;
6493 			BT_DBG("Queued %p (queue len %d)", skb,
6494 			       skb_queue_len(&chan->srej_q));
6495 
6496 			l2cap_pass_to_tx(chan, control);
6497 			l2cap_send_srej_list(chan, control->txseq);
6498 			break;
6499 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6500 			/* We've already queued this frame.  Drop this copy. */
6501 			l2cap_pass_to_tx(chan, control);
6502 			break;
6503 		case L2CAP_TXSEQ_DUPLICATE:
6504 			/* Expecting a later sequence number, so this frame
6505 			 * was already received.  Ignore it completely.
6506 			 */
6507 			break;
6508 		case L2CAP_TXSEQ_INVALID_IGNORE:
6509 			break;
6510 		case L2CAP_TXSEQ_INVALID:
6511 		default:
6512 			l2cap_send_disconn_req(chan, ECONNRESET);
6513 			break;
6514 		}
6515 		break;
6516 	case L2CAP_EV_RECV_RR:
6517 		l2cap_pass_to_tx(chan, control);
6518 		if (control->final) {
6519 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6520 
6521 			if (!test_and_clear_bit(CONN_REJ_ACT,
6522 						&chan->conn_state)) {
6523 				control->final = 0;
6524 				l2cap_retransmit_all(chan, control);
6525 			}
6526 
6527 			l2cap_ertm_send(chan);
6528 		} else if (control->poll) {
6529 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6530 					       &chan->conn_state) &&
6531 			    chan->unacked_frames) {
6532 				__set_retrans_timer(chan);
6533 			}
6534 
6535 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6536 			l2cap_send_srej_tail(chan);
6537 		} else {
6538 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6539 					       &chan->conn_state) &&
6540 			    chan->unacked_frames)
6541 				__set_retrans_timer(chan);
6542 
6543 			l2cap_send_ack(chan);
6544 		}
6545 		break;
6546 	case L2CAP_EV_RECV_RNR:
6547 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6548 		l2cap_pass_to_tx(chan, control);
6549 		if (control->poll) {
6550 			l2cap_send_srej_tail(chan);
6551 		} else {
6552 			struct l2cap_ctrl rr_control;
6553 			memset(&rr_control, 0, sizeof(rr_control));
6554 			rr_control.sframe = 1;
6555 			rr_control.super = L2CAP_SUPER_RR;
6556 			rr_control.reqseq = chan->buffer_seq;
6557 			l2cap_send_sframe(chan, &rr_control);
6558 		}
6559 
6560 		break;
6561 	case L2CAP_EV_RECV_REJ:
6562 		l2cap_handle_rej(chan, control);
6563 		break;
6564 	case L2CAP_EV_RECV_SREJ:
6565 		l2cap_handle_srej(chan, control);
6566 		break;
6567 	}
6568 
6569 	if (skb && !skb_in_use) {
6570 		BT_DBG("Freeing %p", skb);
6571 		kfree_skb(skb);
6572 	}
6573 
6574 	return err;
6575 }
6576 
6577 static int l2cap_finish_move(struct l2cap_chan *chan)
6578 {
6579 	BT_DBG("chan %p", chan);
6580 
6581 	chan->rx_state = L2CAP_RX_STATE_RECV;
6582 
6583 	if (chan->hs_hcon)
6584 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6585 	else
6586 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6587 
6588 	return l2cap_resegment(chan);
6589 }
6590 
6591 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6592 				 struct l2cap_ctrl *control,
6593 				 struct sk_buff *skb, u8 event)
6594 {
6595 	int err;
6596 
6597 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6598 	       event);
6599 
6600 	if (!control->poll)
6601 		return -EPROTO;
6602 
6603 	l2cap_process_reqseq(chan, control->reqseq);
6604 
6605 	if (!skb_queue_empty(&chan->tx_q))
6606 		chan->tx_send_head = skb_peek(&chan->tx_q);
6607 	else
6608 		chan->tx_send_head = NULL;
6609 
6610 	/* Rewind next_tx_seq to the point expected
6611 	 * by the receiver.
6612 	 */
6613 	chan->next_tx_seq = control->reqseq;
6614 	chan->unacked_frames = 0;
6615 
6616 	err = l2cap_finish_move(chan);
6617 	if (err)
6618 		return err;
6619 
6620 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6621 	l2cap_send_i_or_rr_or_rnr(chan);
6622 
6623 	if (event == L2CAP_EV_RECV_IFRAME)
6624 		return -EPROTO;
6625 
6626 	return l2cap_rx_state_recv(chan, control, NULL, event);
6627 }
6628 
6629 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6630 				 struct l2cap_ctrl *control,
6631 				 struct sk_buff *skb, u8 event)
6632 {
6633 	int err;
6634 
6635 	if (!control->final)
6636 		return -EPROTO;
6637 
6638 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6639 
6640 	chan->rx_state = L2CAP_RX_STATE_RECV;
6641 	l2cap_process_reqseq(chan, control->reqseq);
6642 
6643 	if (!skb_queue_empty(&chan->tx_q))
6644 		chan->tx_send_head = skb_peek(&chan->tx_q);
6645 	else
6646 		chan->tx_send_head = NULL;
6647 
6648 	/* Rewind next_tx_seq to the point expected
6649 	 * by the receiver.
6650 	 */
6651 	chan->next_tx_seq = control->reqseq;
6652 	chan->unacked_frames = 0;
6653 
6654 	if (chan->hs_hcon)
6655 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6656 	else
6657 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6658 
6659 	err = l2cap_resegment(chan);
6660 
6661 	if (!err)
6662 		err = l2cap_rx_state_recv(chan, control, skb, event);
6663 
6664 	return err;
6665 }
6666 
6667 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6668 {
6669 	/* Make sure reqseq is for a packet that has been sent but not acked */
6670 	u16 unacked;
6671 
6672 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6673 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6674 }
6675 
6676 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6677 		    struct sk_buff *skb, u8 event)
6678 {
6679 	int err = 0;
6680 
6681 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6682 	       control, skb, event, chan->rx_state);
6683 
6684 	if (__valid_reqseq(chan, control->reqseq)) {
6685 		switch (chan->rx_state) {
6686 		case L2CAP_RX_STATE_RECV:
6687 			err = l2cap_rx_state_recv(chan, control, skb, event);
6688 			break;
6689 		case L2CAP_RX_STATE_SREJ_SENT:
6690 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6691 						       event);
6692 			break;
6693 		case L2CAP_RX_STATE_WAIT_P:
6694 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6695 			break;
6696 		case L2CAP_RX_STATE_WAIT_F:
6697 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6698 			break;
6699 		default:
6700 			/* shut it down */
6701 			break;
6702 		}
6703 	} else {
6704 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6705 		       control->reqseq, chan->next_tx_seq,
6706 		       chan->expected_ack_seq);
6707 		l2cap_send_disconn_req(chan, ECONNRESET);
6708 	}
6709 
6710 	return err;
6711 }
6712 
6713 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6714 			   struct sk_buff *skb)
6715 {
6716 	int err = 0;
6717 
6718 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6719 	       chan->rx_state);
6720 
6721 	if (l2cap_classify_txseq(chan, control->txseq) ==
6722 	    L2CAP_TXSEQ_EXPECTED) {
6723 		l2cap_pass_to_tx(chan, control);
6724 
6725 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6726 		       __next_seq(chan, chan->buffer_seq));
6727 
6728 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6729 
6730 		l2cap_reassemble_sdu(chan, skb, control);
6731 	} else {
6732 		if (chan->sdu) {
6733 			kfree_skb(chan->sdu);
6734 			chan->sdu = NULL;
6735 		}
6736 		chan->sdu_last_frag = NULL;
6737 		chan->sdu_len = 0;
6738 
6739 		if (skb) {
6740 			BT_DBG("Freeing %p", skb);
6741 			kfree_skb(skb);
6742 		}
6743 	}
6744 
6745 	chan->last_acked_seq = control->txseq;
6746 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6747 
6748 	return err;
6749 }
6750 
6751 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6752 {
6753 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6754 	u16 len;
6755 	u8 event;
6756 
6757 	__unpack_control(chan, skb);
6758 
6759 	len = skb->len;
6760 
6761 	/*
6762 	 * We can just drop the corrupted I-frame here.
6763 	 * Receiver will miss it and start proper recovery
6764 	 * procedures and ask for retransmission.
6765 	 */
6766 	if (l2cap_check_fcs(chan, skb))
6767 		goto drop;
6768 
6769 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6770 		len -= L2CAP_SDULEN_SIZE;
6771 
6772 	if (chan->fcs == L2CAP_FCS_CRC16)
6773 		len -= L2CAP_FCS_SIZE;
6774 
6775 	if (len > chan->mps) {
6776 		l2cap_send_disconn_req(chan, ECONNRESET);
6777 		goto drop;
6778 	}
6779 
6780 	if (!control->sframe) {
6781 		int err;
6782 
6783 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6784 		       control->sar, control->reqseq, control->final,
6785 		       control->txseq);
6786 
6787 		/* Validate F-bit - F=0 always valid, F=1 only
6788 		 * valid in TX WAIT_F
6789 		 */
6790 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6791 			goto drop;
6792 
6793 		if (chan->mode != L2CAP_MODE_STREAMING) {
6794 			event = L2CAP_EV_RECV_IFRAME;
6795 			err = l2cap_rx(chan, control, skb, event);
6796 		} else {
6797 			err = l2cap_stream_rx(chan, control, skb);
6798 		}
6799 
6800 		if (err)
6801 			l2cap_send_disconn_req(chan, ECONNRESET);
6802 	} else {
6803 		const u8 rx_func_to_event[4] = {
6804 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6805 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6806 		};
6807 
6808 		/* Only I-frames are expected in streaming mode */
6809 		if (chan->mode == L2CAP_MODE_STREAMING)
6810 			goto drop;
6811 
6812 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6813 		       control->reqseq, control->final, control->poll,
6814 		       control->super);
6815 
6816 		if (len != 0) {
6817 			BT_ERR("Trailing bytes: %d in sframe", len);
6818 			l2cap_send_disconn_req(chan, ECONNRESET);
6819 			goto drop;
6820 		}
6821 
6822 		/* Validate F and P bits */
6823 		if (control->final && (control->poll ||
6824 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6825 			goto drop;
6826 
6827 		event = rx_func_to_event[control->super];
6828 		if (l2cap_rx(chan, control, skb, event))
6829 			l2cap_send_disconn_req(chan, ECONNRESET);
6830 	}
6831 
6832 	return 0;
6833 
6834 drop:
6835 	kfree_skb(skb);
6836 	return 0;
6837 }
6838 
6839 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6840 {
6841 	struct l2cap_conn *conn = chan->conn;
6842 	struct l2cap_le_credits pkt;
6843 	u16 return_credits;
6844 
6845 	/* We return more credits to the sender only after the amount of
6846 	 * credits falls below half of the initial amount.
6847 	 */
6848 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6849 		return;
6850 
6851 	return_credits = le_max_credits - chan->rx_credits;
6852 
6853 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6854 
6855 	chan->rx_credits += return_credits;
6856 
6857 	pkt.cid     = cpu_to_le16(chan->scid);
6858 	pkt.credits = cpu_to_le16(return_credits);
6859 
6860 	chan->ident = l2cap_get_ident(conn);
6861 
6862 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6863 }
6864 
6865 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6866 {
6867 	int err;
6868 
6869 	if (!chan->rx_credits) {
6870 		BT_ERR("No credits to receive LE L2CAP data");
6871 		return -ENOBUFS;
6872 	}
6873 
6874 	if (chan->imtu < skb->len) {
6875 		BT_ERR("Too big LE L2CAP PDU");
6876 		return -ENOBUFS;
6877 	}
6878 
6879 	chan->rx_credits--;
6880 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6881 
6882 	l2cap_chan_le_send_credits(chan);
6883 
6884 	err = 0;
6885 
6886 	if (!chan->sdu) {
6887 		u16 sdu_len;
6888 
6889 		sdu_len = get_unaligned_le16(skb->data);
6890 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6891 
6892 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6893 		       sdu_len, skb->len, chan->imtu);
6894 
6895 		if (sdu_len > chan->imtu) {
6896 			BT_ERR("Too big LE L2CAP SDU length received");
6897 			err = -EMSGSIZE;
6898 			goto failed;
6899 		}
6900 
6901 		if (skb->len > sdu_len) {
6902 			BT_ERR("Too much LE L2CAP data received");
6903 			err = -EINVAL;
6904 			goto failed;
6905 		}
6906 
6907 		if (skb->len == sdu_len)
6908 			return chan->ops->recv(chan, skb);
6909 
6910 		chan->sdu = skb;
6911 		chan->sdu_len = sdu_len;
6912 		chan->sdu_last_frag = skb;
6913 
6914 		return 0;
6915 	}
6916 
6917 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6918 	       chan->sdu->len, skb->len, chan->sdu_len);
6919 
6920 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6921 		BT_ERR("Too much LE L2CAP data received");
6922 		err = -EINVAL;
6923 		goto failed;
6924 	}
6925 
6926 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6927 	skb = NULL;
6928 
6929 	if (chan->sdu->len == chan->sdu_len) {
6930 		err = chan->ops->recv(chan, chan->sdu);
6931 		if (!err) {
6932 			chan->sdu = NULL;
6933 			chan->sdu_last_frag = NULL;
6934 			chan->sdu_len = 0;
6935 		}
6936 	}
6937 
6938 failed:
6939 	if (err) {
6940 		kfree_skb(skb);
6941 		kfree_skb(chan->sdu);
6942 		chan->sdu = NULL;
6943 		chan->sdu_last_frag = NULL;
6944 		chan->sdu_len = 0;
6945 	}
6946 
6947 	/* We can't return an error here since we took care of the skb
6948 	 * freeing internally. An error return would cause the caller to
6949 	 * do a double-free of the skb.
6950 	 */
6951 	return 0;
6952 }
6953 
6954 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6955 			       struct sk_buff *skb)
6956 {
6957 	struct l2cap_chan *chan;
6958 
6959 	chan = l2cap_get_chan_by_scid(conn, cid);
6960 	if (!chan) {
6961 		if (cid == L2CAP_CID_A2MP) {
6962 			chan = a2mp_channel_create(conn, skb);
6963 			if (!chan) {
6964 				kfree_skb(skb);
6965 				return;
6966 			}
6967 
6968 			l2cap_chan_lock(chan);
6969 		} else {
6970 			BT_DBG("unknown cid 0x%4.4x", cid);
6971 			/* Drop packet and return */
6972 			kfree_skb(skb);
6973 			return;
6974 		}
6975 	}
6976 
6977 	BT_DBG("chan %p, len %d", chan, skb->len);
6978 
6979 	if (chan->state != BT_CONNECTED)
6980 		goto drop;
6981 
6982 	switch (chan->mode) {
6983 	case L2CAP_MODE_LE_FLOWCTL:
6984 		if (l2cap_le_data_rcv(chan, skb) < 0)
6985 			goto drop;
6986 
6987 		goto done;
6988 
6989 	case L2CAP_MODE_BASIC:
6990 		/* If socket recv buffers overflows we drop data here
6991 		 * which is *bad* because L2CAP has to be reliable.
6992 		 * But we don't have any other choice. L2CAP doesn't
6993 		 * provide flow control mechanism. */
6994 
6995 		if (chan->imtu < skb->len)
6996 			goto drop;
6997 
6998 		if (!chan->ops->recv(chan, skb))
6999 			goto done;
7000 		break;
7001 
7002 	case L2CAP_MODE_ERTM:
7003 	case L2CAP_MODE_STREAMING:
7004 		l2cap_data_rcv(chan, skb);
7005 		goto done;
7006 
7007 	default:
7008 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7009 		break;
7010 	}
7011 
7012 drop:
7013 	kfree_skb(skb);
7014 
7015 done:
7016 	l2cap_chan_unlock(chan);
7017 }
7018 
7019 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7020 				  struct sk_buff *skb)
7021 {
7022 	struct hci_conn *hcon = conn->hcon;
7023 	struct l2cap_chan *chan;
7024 
7025 	if (hcon->type != ACL_LINK)
7026 		goto drop;
7027 
7028 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7029 					ACL_LINK);
7030 	if (!chan)
7031 		goto drop;
7032 
7033 	BT_DBG("chan %p, len %d", chan, skb->len);
7034 
7035 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7036 		goto drop;
7037 
7038 	if (chan->imtu < skb->len)
7039 		goto drop;
7040 
7041 	/* Store remote BD_ADDR and PSM for msg_name */
7042 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
7043 	bt_cb(skb)->psm = psm;
7044 
7045 	if (!chan->ops->recv(chan, skb))
7046 		return;
7047 
7048 drop:
7049 	kfree_skb(skb);
7050 }
7051 
7052 static void l2cap_att_channel(struct l2cap_conn *conn,
7053 			      struct sk_buff *skb)
7054 {
7055 	struct hci_conn *hcon = conn->hcon;
7056 	struct l2cap_chan *chan;
7057 
7058 	if (hcon->type != LE_LINK)
7059 		goto drop;
7060 
7061 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
7062 					 &hcon->src, &hcon->dst);
7063 	if (!chan)
7064 		goto drop;
7065 
7066 	BT_DBG("chan %p, len %d", chan, skb->len);
7067 
7068 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
7069 		goto drop;
7070 
7071 	if (chan->imtu < skb->len)
7072 		goto drop;
7073 
7074 	if (!chan->ops->recv(chan, skb))
7075 		return;
7076 
7077 drop:
7078 	kfree_skb(skb);
7079 }
7080 
7081 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7082 {
7083 	struct l2cap_hdr *lh = (void *) skb->data;
7084 	u16 cid, len;
7085 	__le16 psm;
7086 
7087 	skb_pull(skb, L2CAP_HDR_SIZE);
7088 	cid = __le16_to_cpu(lh->cid);
7089 	len = __le16_to_cpu(lh->len);
7090 
7091 	if (len != skb->len) {
7092 		kfree_skb(skb);
7093 		return;
7094 	}
7095 
7096 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7097 
7098 	switch (cid) {
7099 	case L2CAP_CID_SIGNALING:
7100 		l2cap_sig_channel(conn, skb);
7101 		break;
7102 
7103 	case L2CAP_CID_CONN_LESS:
7104 		psm = get_unaligned((__le16 *) skb->data);
7105 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7106 		l2cap_conless_channel(conn, psm, skb);
7107 		break;
7108 
7109 	case L2CAP_CID_ATT:
7110 		l2cap_att_channel(conn, skb);
7111 		break;
7112 
7113 	case L2CAP_CID_LE_SIGNALING:
7114 		l2cap_le_sig_channel(conn, skb);
7115 		break;
7116 
7117 	case L2CAP_CID_SMP:
7118 		if (smp_sig_channel(conn, skb))
7119 			l2cap_conn_del(conn->hcon, EACCES);
7120 		break;
7121 
7122 	default:
7123 		l2cap_data_channel(conn, cid, skb);
7124 		break;
7125 	}
7126 }
7127 
7128 /* ---- L2CAP interface with lower layer (HCI) ---- */
7129 
7130 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7131 {
7132 	int exact = 0, lm1 = 0, lm2 = 0;
7133 	struct l2cap_chan *c;
7134 
7135 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7136 
7137 	/* Find listening sockets and check their link_mode */
7138 	read_lock(&chan_list_lock);
7139 	list_for_each_entry(c, &chan_list, global_l) {
7140 		if (c->state != BT_LISTEN)
7141 			continue;
7142 
7143 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7144 			lm1 |= HCI_LM_ACCEPT;
7145 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7146 				lm1 |= HCI_LM_MASTER;
7147 			exact++;
7148 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7149 			lm2 |= HCI_LM_ACCEPT;
7150 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7151 				lm2 |= HCI_LM_MASTER;
7152 		}
7153 	}
7154 	read_unlock(&chan_list_lock);
7155 
7156 	return exact ? lm1 : lm2;
7157 }
7158 
7159 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7160 {
7161 	struct l2cap_conn *conn;
7162 
7163 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7164 
7165 	if (!status) {
7166 		conn = l2cap_conn_add(hcon);
7167 		if (conn)
7168 			l2cap_conn_ready(conn);
7169 	} else {
7170 		l2cap_conn_del(hcon, bt_to_errno(status));
7171 	}
7172 }
7173 
7174 int l2cap_disconn_ind(struct hci_conn *hcon)
7175 {
7176 	struct l2cap_conn *conn = hcon->l2cap_data;
7177 
7178 	BT_DBG("hcon %p", hcon);
7179 
7180 	if (!conn)
7181 		return HCI_ERROR_REMOTE_USER_TERM;
7182 	return conn->disc_reason;
7183 }
7184 
7185 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7186 {
7187 	BT_DBG("hcon %p reason %d", hcon, reason);
7188 
7189 	l2cap_conn_del(hcon, bt_to_errno(reason));
7190 }
7191 
7192 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7193 {
7194 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7195 		return;
7196 
7197 	if (encrypt == 0x00) {
7198 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7199 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7200 		} else if (chan->sec_level == BT_SECURITY_HIGH)
7201 			l2cap_chan_close(chan, ECONNREFUSED);
7202 	} else {
7203 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7204 			__clear_chan_timer(chan);
7205 	}
7206 }
7207 
7208 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7209 {
7210 	struct l2cap_conn *conn = hcon->l2cap_data;
7211 	struct l2cap_chan *chan;
7212 
7213 	if (!conn)
7214 		return 0;
7215 
7216 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7217 
7218 	if (hcon->type == LE_LINK) {
7219 		if (!status && encrypt)
7220 			smp_distribute_keys(conn, 0);
7221 		cancel_delayed_work(&conn->security_timer);
7222 	}
7223 
7224 	mutex_lock(&conn->chan_lock);
7225 
7226 	list_for_each_entry(chan, &conn->chan_l, list) {
7227 		l2cap_chan_lock(chan);
7228 
7229 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7230 		       state_to_string(chan->state));
7231 
7232 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
7233 			l2cap_chan_unlock(chan);
7234 			continue;
7235 		}
7236 
7237 		if (chan->scid == L2CAP_CID_ATT) {
7238 			if (!status && encrypt) {
7239 				chan->sec_level = hcon->sec_level;
7240 				l2cap_chan_ready(chan);
7241 			}
7242 
7243 			l2cap_chan_unlock(chan);
7244 			continue;
7245 		}
7246 
7247 		if (!__l2cap_no_conn_pending(chan)) {
7248 			l2cap_chan_unlock(chan);
7249 			continue;
7250 		}
7251 
7252 		if (!status && (chan->state == BT_CONNECTED ||
7253 				chan->state == BT_CONFIG)) {
7254 			chan->ops->resume(chan);
7255 			l2cap_check_encryption(chan, encrypt);
7256 			l2cap_chan_unlock(chan);
7257 			continue;
7258 		}
7259 
7260 		if (chan->state == BT_CONNECT) {
7261 			if (!status)
7262 				l2cap_start_connection(chan);
7263 			else
7264 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7265 		} else if (chan->state == BT_CONNECT2) {
7266 			struct l2cap_conn_rsp rsp;
7267 			__u16 res, stat;
7268 
7269 			if (!status) {
7270 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7271 					res = L2CAP_CR_PEND;
7272 					stat = L2CAP_CS_AUTHOR_PEND;
7273 					chan->ops->defer(chan);
7274 				} else {
7275 					l2cap_state_change(chan, BT_CONFIG);
7276 					res = L2CAP_CR_SUCCESS;
7277 					stat = L2CAP_CS_NO_INFO;
7278 				}
7279 			} else {
7280 				l2cap_state_change(chan, BT_DISCONN);
7281 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7282 				res = L2CAP_CR_SEC_BLOCK;
7283 				stat = L2CAP_CS_NO_INFO;
7284 			}
7285 
7286 			rsp.scid   = cpu_to_le16(chan->dcid);
7287 			rsp.dcid   = cpu_to_le16(chan->scid);
7288 			rsp.result = cpu_to_le16(res);
7289 			rsp.status = cpu_to_le16(stat);
7290 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7291 				       sizeof(rsp), &rsp);
7292 
7293 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7294 			    res == L2CAP_CR_SUCCESS) {
7295 				char buf[128];
7296 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7297 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7298 					       L2CAP_CONF_REQ,
7299 					       l2cap_build_conf_req(chan, buf),
7300 					       buf);
7301 				chan->num_conf_req++;
7302 			}
7303 		}
7304 
7305 		l2cap_chan_unlock(chan);
7306 	}
7307 
7308 	mutex_unlock(&conn->chan_lock);
7309 
7310 	return 0;
7311 }
7312 
7313 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7314 {
7315 	struct l2cap_conn *conn = hcon->l2cap_data;
7316 	struct l2cap_hdr *hdr;
7317 	int len;
7318 
7319 	/* For AMP controller do not create l2cap conn */
7320 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7321 		goto drop;
7322 
7323 	if (!conn)
7324 		conn = l2cap_conn_add(hcon);
7325 
7326 	if (!conn)
7327 		goto drop;
7328 
7329 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7330 
7331 	switch (flags) {
7332 	case ACL_START:
7333 	case ACL_START_NO_FLUSH:
7334 	case ACL_COMPLETE:
7335 		if (conn->rx_len) {
7336 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7337 			kfree_skb(conn->rx_skb);
7338 			conn->rx_skb = NULL;
7339 			conn->rx_len = 0;
7340 			l2cap_conn_unreliable(conn, ECOMM);
7341 		}
7342 
7343 		/* Start fragment always begin with Basic L2CAP header */
7344 		if (skb->len < L2CAP_HDR_SIZE) {
7345 			BT_ERR("Frame is too short (len %d)", skb->len);
7346 			l2cap_conn_unreliable(conn, ECOMM);
7347 			goto drop;
7348 		}
7349 
7350 		hdr = (struct l2cap_hdr *) skb->data;
7351 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7352 
7353 		if (len == skb->len) {
7354 			/* Complete frame received */
7355 			l2cap_recv_frame(conn, skb);
7356 			return 0;
7357 		}
7358 
7359 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7360 
7361 		if (skb->len > len) {
7362 			BT_ERR("Frame is too long (len %d, expected len %d)",
7363 			       skb->len, len);
7364 			l2cap_conn_unreliable(conn, ECOMM);
7365 			goto drop;
7366 		}
7367 
7368 		/* Allocate skb for the complete frame (with header) */
7369 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7370 		if (!conn->rx_skb)
7371 			goto drop;
7372 
7373 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7374 					  skb->len);
7375 		conn->rx_len = len - skb->len;
7376 		break;
7377 
7378 	case ACL_CONT:
7379 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7380 
7381 		if (!conn->rx_len) {
7382 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7383 			l2cap_conn_unreliable(conn, ECOMM);
7384 			goto drop;
7385 		}
7386 
7387 		if (skb->len > conn->rx_len) {
7388 			BT_ERR("Fragment is too long (len %d, expected %d)",
7389 			       skb->len, conn->rx_len);
7390 			kfree_skb(conn->rx_skb);
7391 			conn->rx_skb = NULL;
7392 			conn->rx_len = 0;
7393 			l2cap_conn_unreliable(conn, ECOMM);
7394 			goto drop;
7395 		}
7396 
7397 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7398 					  skb->len);
7399 		conn->rx_len -= skb->len;
7400 
7401 		if (!conn->rx_len) {
7402 			/* Complete frame received. l2cap_recv_frame
7403 			 * takes ownership of the skb so set the global
7404 			 * rx_skb pointer to NULL first.
7405 			 */
7406 			struct sk_buff *rx_skb = conn->rx_skb;
7407 			conn->rx_skb = NULL;
7408 			l2cap_recv_frame(conn, rx_skb);
7409 		}
7410 		break;
7411 	}
7412 
7413 drop:
7414 	kfree_skb(skb);
7415 	return 0;
7416 }
7417 
7418 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7419 {
7420 	struct l2cap_chan *c;
7421 
7422 	read_lock(&chan_list_lock);
7423 
7424 	list_for_each_entry(c, &chan_list, global_l) {
7425 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7426 			   &c->src, &c->dst,
7427 			   c->state, __le16_to_cpu(c->psm),
7428 			   c->scid, c->dcid, c->imtu, c->omtu,
7429 			   c->sec_level, c->mode);
7430 	}
7431 
7432 	read_unlock(&chan_list_lock);
7433 
7434 	return 0;
7435 }
7436 
7437 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7438 {
7439 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7440 }
7441 
7442 static const struct file_operations l2cap_debugfs_fops = {
7443 	.open		= l2cap_debugfs_open,
7444 	.read		= seq_read,
7445 	.llseek		= seq_lseek,
7446 	.release	= single_release,
7447 };
7448 
7449 static struct dentry *l2cap_debugfs;
7450 
7451 int __init l2cap_init(void)
7452 {
7453 	int err;
7454 
7455 	err = l2cap_init_sockets();
7456 	if (err < 0)
7457 		return err;
7458 
7459 	if (IS_ERR_OR_NULL(bt_debugfs))
7460 		return 0;
7461 
7462 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7463 					    NULL, &l2cap_debugfs_fops);
7464 
7465 	debugfs_create_u16("l2cap_le_max_credits", 0466, bt_debugfs,
7466 			   &le_max_credits);
7467 	debugfs_create_u16("l2cap_le_default_mps", 0466, bt_debugfs,
7468 			   &le_default_mps);
7469 
7470 	return 0;
7471 }
7472 
7473 void l2cap_exit(void)
7474 {
7475 	debugfs_remove(l2cap_debugfs);
7476 	l2cap_cleanup_sockets();
7477 }
7478 
7479 module_param(disable_ertm, bool, 0644);
7480 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7481