xref: /linux/net/bluetooth/l2cap_core.c (revision 988b0c541ed8b1c633c4d4df7169010635942e18)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 #include "6lowpan.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
56 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
57 
58 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
59 				       u8 code, u8 ident, u16 dlen, void *data);
60 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
61 			   void *data);
62 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
63 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
64 
65 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
66 		     struct sk_buff_head *skbs, u8 event);
67 
68 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
69 {
70 	if (hcon->type == LE_LINK) {
71 		if (type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
80 /* ---- L2CAP channels ---- */
81 
82 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
83 						   u16 cid)
84 {
85 	struct l2cap_chan *c;
86 
87 	list_for_each_entry(c, &conn->chan_l, list) {
88 		if (c->dcid == cid)
89 			return c;
90 	}
91 	return NULL;
92 }
93 
94 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
95 						   u16 cid)
96 {
97 	struct l2cap_chan *c;
98 
99 	list_for_each_entry(c, &conn->chan_l, list) {
100 		if (c->scid == cid)
101 			return c;
102 	}
103 	return NULL;
104 }
105 
106 /* Find channel with given SCID.
107  * Returns locked channel. */
108 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
109 						 u16 cid)
110 {
111 	struct l2cap_chan *c;
112 
113 	mutex_lock(&conn->chan_lock);
114 	c = __l2cap_get_chan_by_scid(conn, cid);
115 	if (c)
116 		l2cap_chan_lock(c);
117 	mutex_unlock(&conn->chan_lock);
118 
119 	return c;
120 }
121 
122 /* Find channel with given DCID.
123  * Returns locked channel.
124  */
125 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
126 						 u16 cid)
127 {
128 	struct l2cap_chan *c;
129 
130 	mutex_lock(&conn->chan_lock);
131 	c = __l2cap_get_chan_by_dcid(conn, cid);
132 	if (c)
133 		l2cap_chan_lock(c);
134 	mutex_unlock(&conn->chan_lock);
135 
136 	return c;
137 }
138 
139 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
140 						    u8 ident)
141 {
142 	struct l2cap_chan *c;
143 
144 	list_for_each_entry(c, &conn->chan_l, list) {
145 		if (c->ident == ident)
146 			return c;
147 	}
148 	return NULL;
149 }
150 
151 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
152 						  u8 ident)
153 {
154 	struct l2cap_chan *c;
155 
156 	mutex_lock(&conn->chan_lock);
157 	c = __l2cap_get_chan_by_ident(conn, ident);
158 	if (c)
159 		l2cap_chan_lock(c);
160 	mutex_unlock(&conn->chan_lock);
161 
162 	return c;
163 }
164 
165 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 {
167 	struct l2cap_chan *c;
168 
169 	list_for_each_entry(c, &chan_list, global_l) {
170 		if (c->sport == psm && !bacmp(&c->src, src))
171 			return c;
172 	}
173 	return NULL;
174 }
175 
176 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
177 {
178 	int err;
179 
180 	write_lock(&chan_list_lock);
181 
182 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 		err = -EADDRINUSE;
184 		goto done;
185 	}
186 
187 	if (psm) {
188 		chan->psm = psm;
189 		chan->sport = psm;
190 		err = 0;
191 	} else {
192 		u16 p;
193 
194 		err = -EINVAL;
195 		for (p = 0x1001; p < 0x1100; p += 2)
196 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
197 				chan->psm   = cpu_to_le16(p);
198 				chan->sport = cpu_to_le16(p);
199 				err = 0;
200 				break;
201 			}
202 	}
203 
204 done:
205 	write_unlock(&chan_list_lock);
206 	return err;
207 }
208 
209 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
210 {
211 	write_lock(&chan_list_lock);
212 
213 	chan->scid = scid;
214 
215 	write_unlock(&chan_list_lock);
216 
217 	return 0;
218 }
219 
220 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 {
222 	u16 cid, dyn_end;
223 
224 	if (conn->hcon->type == LE_LINK)
225 		dyn_end = L2CAP_CID_LE_DYN_END;
226 	else
227 		dyn_end = L2CAP_CID_DYN_END;
228 
229 	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
230 		if (!__l2cap_get_chan_by_scid(conn, cid))
231 			return cid;
232 	}
233 
234 	return 0;
235 }
236 
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
240 	       state_to_string(state));
241 
242 	chan->state = state;
243 	chan->ops->state_change(chan, state, 0);
244 }
245 
246 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
247 						int state, int err)
248 {
249 	chan->state = state;
250 	chan->ops->state_change(chan, chan->state, err);
251 }
252 
253 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
254 {
255 	chan->ops->state_change(chan, chan->state, err);
256 }
257 
258 static void __set_retrans_timer(struct l2cap_chan *chan)
259 {
260 	if (!delayed_work_pending(&chan->monitor_timer) &&
261 	    chan->retrans_timeout) {
262 		l2cap_set_timer(chan, &chan->retrans_timer,
263 				msecs_to_jiffies(chan->retrans_timeout));
264 	}
265 }
266 
267 static void __set_monitor_timer(struct l2cap_chan *chan)
268 {
269 	__clear_retrans_timer(chan);
270 	if (chan->monitor_timeout) {
271 		l2cap_set_timer(chan, &chan->monitor_timer,
272 				msecs_to_jiffies(chan->monitor_timeout));
273 	}
274 }
275 
276 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
277 					       u16 seq)
278 {
279 	struct sk_buff *skb;
280 
281 	skb_queue_walk(head, skb) {
282 		if (bt_cb(skb)->control.txseq == seq)
283 			return skb;
284 	}
285 
286 	return NULL;
287 }
288 
289 /* ---- L2CAP sequence number lists ---- */
290 
291 /* For ERTM, ordered lists of sequence numbers must be tracked for
292  * SREJ requests that are received and for frames that are to be
293  * retransmitted. These seq_list functions implement a singly-linked
294  * list in an array, where membership in the list can also be checked
295  * in constant time. Items can also be added to the tail of the list
296  * and removed from the head in constant time, without further memory
297  * allocs or frees.
298  */
299 
300 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
301 {
302 	size_t alloc_size, i;
303 
304 	/* Allocated size is a power of 2 to map sequence numbers
305 	 * (which may be up to 14 bits) in to a smaller array that is
306 	 * sized for the negotiated ERTM transmit windows.
307 	 */
308 	alloc_size = roundup_pow_of_two(size);
309 
310 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
311 	if (!seq_list->list)
312 		return -ENOMEM;
313 
314 	seq_list->mask = alloc_size - 1;
315 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
316 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
317 	for (i = 0; i < alloc_size; i++)
318 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
319 
320 	return 0;
321 }
322 
323 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
324 {
325 	kfree(seq_list->list);
326 }
327 
328 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
329 					   u16 seq)
330 {
331 	/* Constant-time check for list membership */
332 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
333 }
334 
335 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
336 {
337 	u16 seq = seq_list->head;
338 	u16 mask = seq_list->mask;
339 
340 	seq_list->head = seq_list->list[seq & mask];
341 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
342 
343 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
344 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 	}
347 
348 	return seq;
349 }
350 
351 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
352 {
353 	u16 i;
354 
355 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
356 		return;
357 
358 	for (i = 0; i <= seq_list->mask; i++)
359 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
360 
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 }
364 
365 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
366 {
367 	u16 mask = seq_list->mask;
368 
369 	/* All appends happen in constant time */
370 
371 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
372 		return;
373 
374 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
375 		seq_list->head = seq;
376 	else
377 		seq_list->list[seq_list->tail & mask] = seq;
378 
379 	seq_list->tail = seq;
380 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
381 }
382 
383 static void l2cap_chan_timeout(struct work_struct *work)
384 {
385 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
386 					       chan_timer.work);
387 	struct l2cap_conn *conn = chan->conn;
388 	int reason;
389 
390 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
391 
392 	mutex_lock(&conn->chan_lock);
393 	l2cap_chan_lock(chan);
394 
395 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
396 		reason = ECONNREFUSED;
397 	else if (chan->state == BT_CONNECT &&
398 		 chan->sec_level != BT_SECURITY_SDP)
399 		reason = ECONNREFUSED;
400 	else
401 		reason = ETIMEDOUT;
402 
403 	l2cap_chan_close(chan, reason);
404 
405 	l2cap_chan_unlock(chan);
406 
407 	chan->ops->close(chan);
408 	mutex_unlock(&conn->chan_lock);
409 
410 	l2cap_chan_put(chan);
411 }
412 
413 struct l2cap_chan *l2cap_chan_create(void)
414 {
415 	struct l2cap_chan *chan;
416 
417 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
418 	if (!chan)
419 		return NULL;
420 
421 	mutex_init(&chan->lock);
422 
423 	write_lock(&chan_list_lock);
424 	list_add(&chan->global_l, &chan_list);
425 	write_unlock(&chan_list_lock);
426 
427 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
428 
429 	chan->state = BT_OPEN;
430 
431 	kref_init(&chan->kref);
432 
433 	/* This flag is cleared in l2cap_chan_ready() */
434 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
435 
436 	BT_DBG("chan %p", chan);
437 
438 	return chan;
439 }
440 
441 static void l2cap_chan_destroy(struct kref *kref)
442 {
443 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
444 
445 	BT_DBG("chan %p", chan);
446 
447 	write_lock(&chan_list_lock);
448 	list_del(&chan->global_l);
449 	write_unlock(&chan_list_lock);
450 
451 	kfree(chan);
452 }
453 
454 void l2cap_chan_hold(struct l2cap_chan *c)
455 {
456 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
457 
458 	kref_get(&c->kref);
459 }
460 
461 void l2cap_chan_put(struct l2cap_chan *c)
462 {
463 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
464 
465 	kref_put(&c->kref, l2cap_chan_destroy);
466 }
467 
468 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
469 {
470 	chan->fcs  = L2CAP_FCS_CRC16;
471 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
472 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
473 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
474 	chan->remote_max_tx = chan->max_tx;
475 	chan->remote_tx_win = chan->tx_win;
476 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
477 	chan->sec_level = BT_SECURITY_LOW;
478 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
479 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
480 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
481 	chan->conf_state = 0;
482 
483 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
484 }
485 
486 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
487 {
488 	chan->sdu = NULL;
489 	chan->sdu_last_frag = NULL;
490 	chan->sdu_len = 0;
491 	chan->tx_credits = 0;
492 	chan->rx_credits = le_max_credits;
493 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
494 
495 	skb_queue_head_init(&chan->tx_q);
496 }
497 
498 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
499 {
500 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
501 	       __le16_to_cpu(chan->psm), chan->dcid);
502 
503 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
504 
505 	chan->conn = conn;
506 
507 	switch (chan->chan_type) {
508 	case L2CAP_CHAN_CONN_ORIENTED:
509 		/* Alloc CID for connection-oriented socket */
510 		chan->scid = l2cap_alloc_cid(conn);
511 		if (conn->hcon->type == ACL_LINK)
512 			chan->omtu = L2CAP_DEFAULT_MTU;
513 		break;
514 
515 	case L2CAP_CHAN_CONN_LESS:
516 		/* Connectionless socket */
517 		chan->scid = L2CAP_CID_CONN_LESS;
518 		chan->dcid = L2CAP_CID_CONN_LESS;
519 		chan->omtu = L2CAP_DEFAULT_MTU;
520 		break;
521 
522 	case L2CAP_CHAN_FIXED:
523 		/* Caller will set CID and CID specific MTU values */
524 		break;
525 
526 	default:
527 		/* Raw socket can send/recv signalling messages only */
528 		chan->scid = L2CAP_CID_SIGNALING;
529 		chan->dcid = L2CAP_CID_SIGNALING;
530 		chan->omtu = L2CAP_DEFAULT_MTU;
531 	}
532 
533 	chan->local_id		= L2CAP_BESTEFFORT_ID;
534 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
535 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
536 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
537 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
538 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
539 
540 	l2cap_chan_hold(chan);
541 
542 	hci_conn_hold(conn->hcon);
543 
544 	list_add(&chan->list, &conn->chan_l);
545 }
546 
547 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
548 {
549 	mutex_lock(&conn->chan_lock);
550 	__l2cap_chan_add(conn, chan);
551 	mutex_unlock(&conn->chan_lock);
552 }
553 
554 void l2cap_chan_del(struct l2cap_chan *chan, int err)
555 {
556 	struct l2cap_conn *conn = chan->conn;
557 
558 	__clear_chan_timer(chan);
559 
560 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
561 
562 	if (conn) {
563 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
564 		/* Delete from channel list */
565 		list_del(&chan->list);
566 
567 		l2cap_chan_put(chan);
568 
569 		chan->conn = NULL;
570 
571 		if (chan->scid != L2CAP_CID_A2MP)
572 			hci_conn_drop(conn->hcon);
573 
574 		if (mgr && mgr->bredr_chan == chan)
575 			mgr->bredr_chan = NULL;
576 	}
577 
578 	if (chan->hs_hchan) {
579 		struct hci_chan *hs_hchan = chan->hs_hchan;
580 
581 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
582 		amp_disconnect_logical_link(hs_hchan);
583 	}
584 
585 	chan->ops->teardown(chan, err);
586 
587 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
588 		return;
589 
590 	switch(chan->mode) {
591 	case L2CAP_MODE_BASIC:
592 		break;
593 
594 	case L2CAP_MODE_LE_FLOWCTL:
595 		skb_queue_purge(&chan->tx_q);
596 		break;
597 
598 	case L2CAP_MODE_ERTM:
599 		__clear_retrans_timer(chan);
600 		__clear_monitor_timer(chan);
601 		__clear_ack_timer(chan);
602 
603 		skb_queue_purge(&chan->srej_q);
604 
605 		l2cap_seq_list_free(&chan->srej_list);
606 		l2cap_seq_list_free(&chan->retrans_list);
607 
608 		/* fall through */
609 
610 	case L2CAP_MODE_STREAMING:
611 		skb_queue_purge(&chan->tx_q);
612 		break;
613 	}
614 
615 	return;
616 }
617 
618 void l2cap_conn_update_id_addr(struct hci_conn *hcon)
619 {
620 	struct l2cap_conn *conn = hcon->l2cap_data;
621 	struct l2cap_chan *chan;
622 
623 	mutex_lock(&conn->chan_lock);
624 
625 	list_for_each_entry(chan, &conn->chan_l, list) {
626 		l2cap_chan_lock(chan);
627 		bacpy(&chan->dst, &hcon->dst);
628 		chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
629 		l2cap_chan_unlock(chan);
630 	}
631 
632 	mutex_unlock(&conn->chan_lock);
633 }
634 
635 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
636 {
637 	struct l2cap_conn *conn = chan->conn;
638 	struct l2cap_le_conn_rsp rsp;
639 	u16 result;
640 
641 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
642 		result = L2CAP_CR_AUTHORIZATION;
643 	else
644 		result = L2CAP_CR_BAD_PSM;
645 
646 	l2cap_state_change(chan, BT_DISCONN);
647 
648 	rsp.dcid    = cpu_to_le16(chan->scid);
649 	rsp.mtu     = cpu_to_le16(chan->imtu);
650 	rsp.mps     = cpu_to_le16(chan->mps);
651 	rsp.credits = cpu_to_le16(chan->rx_credits);
652 	rsp.result  = cpu_to_le16(result);
653 
654 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
655 		       &rsp);
656 }
657 
658 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
659 {
660 	struct l2cap_conn *conn = chan->conn;
661 	struct l2cap_conn_rsp rsp;
662 	u16 result;
663 
664 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
665 		result = L2CAP_CR_SEC_BLOCK;
666 	else
667 		result = L2CAP_CR_BAD_PSM;
668 
669 	l2cap_state_change(chan, BT_DISCONN);
670 
671 	rsp.scid   = cpu_to_le16(chan->dcid);
672 	rsp.dcid   = cpu_to_le16(chan->scid);
673 	rsp.result = cpu_to_le16(result);
674 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
675 
676 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
677 }
678 
679 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
680 {
681 	struct l2cap_conn *conn = chan->conn;
682 
683 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
684 
685 	switch (chan->state) {
686 	case BT_LISTEN:
687 		chan->ops->teardown(chan, 0);
688 		break;
689 
690 	case BT_CONNECTED:
691 	case BT_CONFIG:
692 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
693 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
694 			l2cap_send_disconn_req(chan, reason);
695 		} else
696 			l2cap_chan_del(chan, reason);
697 		break;
698 
699 	case BT_CONNECT2:
700 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
701 			if (conn->hcon->type == ACL_LINK)
702 				l2cap_chan_connect_reject(chan);
703 			else if (conn->hcon->type == LE_LINK)
704 				l2cap_chan_le_connect_reject(chan);
705 		}
706 
707 		l2cap_chan_del(chan, reason);
708 		break;
709 
710 	case BT_CONNECT:
711 	case BT_DISCONN:
712 		l2cap_chan_del(chan, reason);
713 		break;
714 
715 	default:
716 		chan->ops->teardown(chan, 0);
717 		break;
718 	}
719 }
720 
721 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
722 {
723 	switch (chan->chan_type) {
724 	case L2CAP_CHAN_RAW:
725 		switch (chan->sec_level) {
726 		case BT_SECURITY_HIGH:
727 		case BT_SECURITY_FIPS:
728 			return HCI_AT_DEDICATED_BONDING_MITM;
729 		case BT_SECURITY_MEDIUM:
730 			return HCI_AT_DEDICATED_BONDING;
731 		default:
732 			return HCI_AT_NO_BONDING;
733 		}
734 		break;
735 	case L2CAP_CHAN_CONN_LESS:
736 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
737 			if (chan->sec_level == BT_SECURITY_LOW)
738 				chan->sec_level = BT_SECURITY_SDP;
739 		}
740 		if (chan->sec_level == BT_SECURITY_HIGH ||
741 		    chan->sec_level == BT_SECURITY_FIPS)
742 			return HCI_AT_NO_BONDING_MITM;
743 		else
744 			return HCI_AT_NO_BONDING;
745 		break;
746 	case L2CAP_CHAN_CONN_ORIENTED:
747 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
748 			if (chan->sec_level == BT_SECURITY_LOW)
749 				chan->sec_level = BT_SECURITY_SDP;
750 
751 			if (chan->sec_level == BT_SECURITY_HIGH ||
752 			    chan->sec_level == BT_SECURITY_FIPS)
753 				return HCI_AT_NO_BONDING_MITM;
754 			else
755 				return HCI_AT_NO_BONDING;
756 		}
757 		/* fall through */
758 	default:
759 		switch (chan->sec_level) {
760 		case BT_SECURITY_HIGH:
761 		case BT_SECURITY_FIPS:
762 			return HCI_AT_GENERAL_BONDING_MITM;
763 		case BT_SECURITY_MEDIUM:
764 			return HCI_AT_GENERAL_BONDING;
765 		default:
766 			return HCI_AT_NO_BONDING;
767 		}
768 		break;
769 	}
770 }
771 
772 /* Service level security */
773 int l2cap_chan_check_security(struct l2cap_chan *chan)
774 {
775 	struct l2cap_conn *conn = chan->conn;
776 	__u8 auth_type;
777 
778 	if (conn->hcon->type == LE_LINK)
779 		return smp_conn_security(conn->hcon, chan->sec_level);
780 
781 	auth_type = l2cap_get_auth_type(chan);
782 
783 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
784 }
785 
786 static u8 l2cap_get_ident(struct l2cap_conn *conn)
787 {
788 	u8 id;
789 
790 	/* Get next available identificator.
791 	 *    1 - 128 are used by kernel.
792 	 *  129 - 199 are reserved.
793 	 *  200 - 254 are used by utilities like l2ping, etc.
794 	 */
795 
796 	spin_lock(&conn->lock);
797 
798 	if (++conn->tx_ident > 128)
799 		conn->tx_ident = 1;
800 
801 	id = conn->tx_ident;
802 
803 	spin_unlock(&conn->lock);
804 
805 	return id;
806 }
807 
808 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
809 			   void *data)
810 {
811 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
812 	u8 flags;
813 
814 	BT_DBG("code 0x%2.2x", code);
815 
816 	if (!skb)
817 		return;
818 
819 	if (lmp_no_flush_capable(conn->hcon->hdev))
820 		flags = ACL_START_NO_FLUSH;
821 	else
822 		flags = ACL_START;
823 
824 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
825 	skb->priority = HCI_PRIO_MAX;
826 
827 	hci_send_acl(conn->hchan, skb, flags);
828 }
829 
830 static bool __chan_is_moving(struct l2cap_chan *chan)
831 {
832 	return chan->move_state != L2CAP_MOVE_STABLE &&
833 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
834 }
835 
836 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
837 {
838 	struct hci_conn *hcon = chan->conn->hcon;
839 	u16 flags;
840 
841 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
842 	       skb->priority);
843 
844 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
845 		if (chan->hs_hchan)
846 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
847 		else
848 			kfree_skb(skb);
849 
850 		return;
851 	}
852 
853 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
854 	    lmp_no_flush_capable(hcon->hdev))
855 		flags = ACL_START_NO_FLUSH;
856 	else
857 		flags = ACL_START;
858 
859 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
860 	hci_send_acl(chan->conn->hchan, skb, flags);
861 }
862 
863 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
864 {
865 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
866 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
867 
868 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
869 		/* S-Frame */
870 		control->sframe = 1;
871 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
872 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
873 
874 		control->sar = 0;
875 		control->txseq = 0;
876 	} else {
877 		/* I-Frame */
878 		control->sframe = 0;
879 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
880 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
881 
882 		control->poll = 0;
883 		control->super = 0;
884 	}
885 }
886 
887 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
888 {
889 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
890 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
891 
892 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
893 		/* S-Frame */
894 		control->sframe = 1;
895 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
896 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
897 
898 		control->sar = 0;
899 		control->txseq = 0;
900 	} else {
901 		/* I-Frame */
902 		control->sframe = 0;
903 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
904 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
905 
906 		control->poll = 0;
907 		control->super = 0;
908 	}
909 }
910 
911 static inline void __unpack_control(struct l2cap_chan *chan,
912 				    struct sk_buff *skb)
913 {
914 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
915 		__unpack_extended_control(get_unaligned_le32(skb->data),
916 					  &bt_cb(skb)->control);
917 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
918 	} else {
919 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
920 					  &bt_cb(skb)->control);
921 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
922 	}
923 }
924 
925 static u32 __pack_extended_control(struct l2cap_ctrl *control)
926 {
927 	u32 packed;
928 
929 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
930 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
931 
932 	if (control->sframe) {
933 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
934 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
935 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
936 	} else {
937 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
938 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
939 	}
940 
941 	return packed;
942 }
943 
944 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
945 {
946 	u16 packed;
947 
948 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
949 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
950 
951 	if (control->sframe) {
952 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
953 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
954 		packed |= L2CAP_CTRL_FRAME_TYPE;
955 	} else {
956 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
957 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
958 	}
959 
960 	return packed;
961 }
962 
963 static inline void __pack_control(struct l2cap_chan *chan,
964 				  struct l2cap_ctrl *control,
965 				  struct sk_buff *skb)
966 {
967 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
968 		put_unaligned_le32(__pack_extended_control(control),
969 				   skb->data + L2CAP_HDR_SIZE);
970 	} else {
971 		put_unaligned_le16(__pack_enhanced_control(control),
972 				   skb->data + L2CAP_HDR_SIZE);
973 	}
974 }
975 
976 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
977 {
978 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
979 		return L2CAP_EXT_HDR_SIZE;
980 	else
981 		return L2CAP_ENH_HDR_SIZE;
982 }
983 
984 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
985 					       u32 control)
986 {
987 	struct sk_buff *skb;
988 	struct l2cap_hdr *lh;
989 	int hlen = __ertm_hdr_size(chan);
990 
991 	if (chan->fcs == L2CAP_FCS_CRC16)
992 		hlen += L2CAP_FCS_SIZE;
993 
994 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
995 
996 	if (!skb)
997 		return ERR_PTR(-ENOMEM);
998 
999 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1000 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1001 	lh->cid = cpu_to_le16(chan->dcid);
1002 
1003 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1004 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1005 	else
1006 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1007 
1008 	if (chan->fcs == L2CAP_FCS_CRC16) {
1009 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1010 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1011 	}
1012 
1013 	skb->priority = HCI_PRIO_MAX;
1014 	return skb;
1015 }
1016 
1017 static void l2cap_send_sframe(struct l2cap_chan *chan,
1018 			      struct l2cap_ctrl *control)
1019 {
1020 	struct sk_buff *skb;
1021 	u32 control_field;
1022 
1023 	BT_DBG("chan %p, control %p", chan, control);
1024 
1025 	if (!control->sframe)
1026 		return;
1027 
1028 	if (__chan_is_moving(chan))
1029 		return;
1030 
1031 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1032 	    !control->poll)
1033 		control->final = 1;
1034 
1035 	if (control->super == L2CAP_SUPER_RR)
1036 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1037 	else if (control->super == L2CAP_SUPER_RNR)
1038 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1039 
1040 	if (control->super != L2CAP_SUPER_SREJ) {
1041 		chan->last_acked_seq = control->reqseq;
1042 		__clear_ack_timer(chan);
1043 	}
1044 
1045 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1046 	       control->final, control->poll, control->super);
1047 
1048 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1049 		control_field = __pack_extended_control(control);
1050 	else
1051 		control_field = __pack_enhanced_control(control);
1052 
1053 	skb = l2cap_create_sframe_pdu(chan, control_field);
1054 	if (!IS_ERR(skb))
1055 		l2cap_do_send(chan, skb);
1056 }
1057 
1058 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1059 {
1060 	struct l2cap_ctrl control;
1061 
1062 	BT_DBG("chan %p, poll %d", chan, poll);
1063 
1064 	memset(&control, 0, sizeof(control));
1065 	control.sframe = 1;
1066 	control.poll = poll;
1067 
1068 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1069 		control.super = L2CAP_SUPER_RNR;
1070 	else
1071 		control.super = L2CAP_SUPER_RR;
1072 
1073 	control.reqseq = chan->buffer_seq;
1074 	l2cap_send_sframe(chan, &control);
1075 }
1076 
1077 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1078 {
1079 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1080 }
1081 
1082 static bool __amp_capable(struct l2cap_chan *chan)
1083 {
1084 	struct l2cap_conn *conn = chan->conn;
1085 	struct hci_dev *hdev;
1086 	bool amp_available = false;
1087 
1088 	if (!conn->hs_enabled)
1089 		return false;
1090 
1091 	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1092 		return false;
1093 
1094 	read_lock(&hci_dev_list_lock);
1095 	list_for_each_entry(hdev, &hci_dev_list, list) {
1096 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1097 		    test_bit(HCI_UP, &hdev->flags)) {
1098 			amp_available = true;
1099 			break;
1100 		}
1101 	}
1102 	read_unlock(&hci_dev_list_lock);
1103 
1104 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1105 		return amp_available;
1106 
1107 	return false;
1108 }
1109 
1110 static bool l2cap_check_efs(struct l2cap_chan *chan)
1111 {
1112 	/* Check EFS parameters */
1113 	return true;
1114 }
1115 
1116 void l2cap_send_conn_req(struct l2cap_chan *chan)
1117 {
1118 	struct l2cap_conn *conn = chan->conn;
1119 	struct l2cap_conn_req req;
1120 
1121 	req.scid = cpu_to_le16(chan->scid);
1122 	req.psm  = chan->psm;
1123 
1124 	chan->ident = l2cap_get_ident(conn);
1125 
1126 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1127 
1128 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1129 }
1130 
1131 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1132 {
1133 	struct l2cap_create_chan_req req;
1134 	req.scid = cpu_to_le16(chan->scid);
1135 	req.psm  = chan->psm;
1136 	req.amp_id = amp_id;
1137 
1138 	chan->ident = l2cap_get_ident(chan->conn);
1139 
1140 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1141 		       sizeof(req), &req);
1142 }
1143 
1144 static void l2cap_move_setup(struct l2cap_chan *chan)
1145 {
1146 	struct sk_buff *skb;
1147 
1148 	BT_DBG("chan %p", chan);
1149 
1150 	if (chan->mode != L2CAP_MODE_ERTM)
1151 		return;
1152 
1153 	__clear_retrans_timer(chan);
1154 	__clear_monitor_timer(chan);
1155 	__clear_ack_timer(chan);
1156 
1157 	chan->retry_count = 0;
1158 	skb_queue_walk(&chan->tx_q, skb) {
1159 		if (bt_cb(skb)->control.retries)
1160 			bt_cb(skb)->control.retries = 1;
1161 		else
1162 			break;
1163 	}
1164 
1165 	chan->expected_tx_seq = chan->buffer_seq;
1166 
1167 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1168 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1169 	l2cap_seq_list_clear(&chan->retrans_list);
1170 	l2cap_seq_list_clear(&chan->srej_list);
1171 	skb_queue_purge(&chan->srej_q);
1172 
1173 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1174 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1175 
1176 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1177 }
1178 
1179 static void l2cap_move_done(struct l2cap_chan *chan)
1180 {
1181 	u8 move_role = chan->move_role;
1182 	BT_DBG("chan %p", chan);
1183 
1184 	chan->move_state = L2CAP_MOVE_STABLE;
1185 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1186 
1187 	if (chan->mode != L2CAP_MODE_ERTM)
1188 		return;
1189 
1190 	switch (move_role) {
1191 	case L2CAP_MOVE_ROLE_INITIATOR:
1192 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1193 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1194 		break;
1195 	case L2CAP_MOVE_ROLE_RESPONDER:
1196 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1197 		break;
1198 	}
1199 }
1200 
1201 static void l2cap_chan_ready(struct l2cap_chan *chan)
1202 {
1203 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1204 	chan->conf_state = 0;
1205 	__clear_chan_timer(chan);
1206 
1207 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1208 		chan->ops->suspend(chan);
1209 
1210 	chan->state = BT_CONNECTED;
1211 
1212 	chan->ops->ready(chan);
1213 }
1214 
1215 static void l2cap_le_connect(struct l2cap_chan *chan)
1216 {
1217 	struct l2cap_conn *conn = chan->conn;
1218 	struct l2cap_le_conn_req req;
1219 
1220 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1221 		return;
1222 
1223 	req.psm     = chan->psm;
1224 	req.scid    = cpu_to_le16(chan->scid);
1225 	req.mtu     = cpu_to_le16(chan->imtu);
1226 	req.mps     = cpu_to_le16(chan->mps);
1227 	req.credits = cpu_to_le16(chan->rx_credits);
1228 
1229 	chan->ident = l2cap_get_ident(conn);
1230 
1231 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1232 		       sizeof(req), &req);
1233 }
1234 
1235 static void l2cap_le_start(struct l2cap_chan *chan)
1236 {
1237 	struct l2cap_conn *conn = chan->conn;
1238 
1239 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1240 		return;
1241 
1242 	if (!chan->psm) {
1243 		l2cap_chan_ready(chan);
1244 		return;
1245 	}
1246 
1247 	if (chan->state == BT_CONNECT)
1248 		l2cap_le_connect(chan);
1249 }
1250 
1251 static void l2cap_start_connection(struct l2cap_chan *chan)
1252 {
1253 	if (__amp_capable(chan)) {
1254 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1255 		a2mp_discover_amp(chan);
1256 	} else if (chan->conn->hcon->type == LE_LINK) {
1257 		l2cap_le_start(chan);
1258 	} else {
1259 		l2cap_send_conn_req(chan);
1260 	}
1261 }
1262 
1263 static void l2cap_do_start(struct l2cap_chan *chan)
1264 {
1265 	struct l2cap_conn *conn = chan->conn;
1266 
1267 	if (conn->hcon->type == LE_LINK) {
1268 		l2cap_le_start(chan);
1269 		return;
1270 	}
1271 
1272 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1273 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1274 			return;
1275 
1276 		if (l2cap_chan_check_security(chan) &&
1277 		    __l2cap_no_conn_pending(chan)) {
1278 			l2cap_start_connection(chan);
1279 		}
1280 	} else {
1281 		struct l2cap_info_req req;
1282 		req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1283 
1284 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1285 		conn->info_ident = l2cap_get_ident(conn);
1286 
1287 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1288 
1289 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1290 			       sizeof(req), &req);
1291 	}
1292 }
1293 
1294 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1295 {
1296 	u32 local_feat_mask = l2cap_feat_mask;
1297 	if (!disable_ertm)
1298 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1299 
1300 	switch (mode) {
1301 	case L2CAP_MODE_ERTM:
1302 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1303 	case L2CAP_MODE_STREAMING:
1304 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1305 	default:
1306 		return 0x00;
1307 	}
1308 }
1309 
1310 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1311 {
1312 	struct l2cap_conn *conn = chan->conn;
1313 	struct l2cap_disconn_req req;
1314 
1315 	if (!conn)
1316 		return;
1317 
1318 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1319 		__clear_retrans_timer(chan);
1320 		__clear_monitor_timer(chan);
1321 		__clear_ack_timer(chan);
1322 	}
1323 
1324 	if (chan->scid == L2CAP_CID_A2MP) {
1325 		l2cap_state_change(chan, BT_DISCONN);
1326 		return;
1327 	}
1328 
1329 	req.dcid = cpu_to_le16(chan->dcid);
1330 	req.scid = cpu_to_le16(chan->scid);
1331 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1332 		       sizeof(req), &req);
1333 
1334 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1335 }
1336 
1337 /* ---- L2CAP connections ---- */
1338 static void l2cap_conn_start(struct l2cap_conn *conn)
1339 {
1340 	struct l2cap_chan *chan, *tmp;
1341 
1342 	BT_DBG("conn %p", conn);
1343 
1344 	mutex_lock(&conn->chan_lock);
1345 
1346 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1347 		l2cap_chan_lock(chan);
1348 
1349 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1350 			l2cap_chan_unlock(chan);
1351 			continue;
1352 		}
1353 
1354 		if (chan->state == BT_CONNECT) {
1355 			if (!l2cap_chan_check_security(chan) ||
1356 			    !__l2cap_no_conn_pending(chan)) {
1357 				l2cap_chan_unlock(chan);
1358 				continue;
1359 			}
1360 
1361 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1362 			    && test_bit(CONF_STATE2_DEVICE,
1363 					&chan->conf_state)) {
1364 				l2cap_chan_close(chan, ECONNRESET);
1365 				l2cap_chan_unlock(chan);
1366 				continue;
1367 			}
1368 
1369 			l2cap_start_connection(chan);
1370 
1371 		} else if (chan->state == BT_CONNECT2) {
1372 			struct l2cap_conn_rsp rsp;
1373 			char buf[128];
1374 			rsp.scid = cpu_to_le16(chan->dcid);
1375 			rsp.dcid = cpu_to_le16(chan->scid);
1376 
1377 			if (l2cap_chan_check_security(chan)) {
1378 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1379 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1380 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1381 					chan->ops->defer(chan);
1382 
1383 				} else {
1384 					l2cap_state_change(chan, BT_CONFIG);
1385 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1386 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1387 				}
1388 			} else {
1389 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1390 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1391 			}
1392 
1393 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1394 				       sizeof(rsp), &rsp);
1395 
1396 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1397 			    rsp.result != L2CAP_CR_SUCCESS) {
1398 				l2cap_chan_unlock(chan);
1399 				continue;
1400 			}
1401 
1402 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1403 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1404 				       l2cap_build_conf_req(chan, buf), buf);
1405 			chan->num_conf_req++;
1406 		}
1407 
1408 		l2cap_chan_unlock(chan);
1409 	}
1410 
1411 	mutex_unlock(&conn->chan_lock);
1412 }
1413 
1414 /* Find socket with cid and source/destination bdaddr.
1415  * Returns closest match, locked.
1416  */
1417 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1418 						    bdaddr_t *src,
1419 						    bdaddr_t *dst)
1420 {
1421 	struct l2cap_chan *c, *c1 = NULL;
1422 
1423 	read_lock(&chan_list_lock);
1424 
1425 	list_for_each_entry(c, &chan_list, global_l) {
1426 		if (state && c->state != state)
1427 			continue;
1428 
1429 		if (c->scid == cid) {
1430 			int src_match, dst_match;
1431 			int src_any, dst_any;
1432 
1433 			/* Exact match. */
1434 			src_match = !bacmp(&c->src, src);
1435 			dst_match = !bacmp(&c->dst, dst);
1436 			if (src_match && dst_match) {
1437 				read_unlock(&chan_list_lock);
1438 				return c;
1439 			}
1440 
1441 			/* Closest match */
1442 			src_any = !bacmp(&c->src, BDADDR_ANY);
1443 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1444 			if ((src_match && dst_any) || (src_any && dst_match) ||
1445 			    (src_any && dst_any))
1446 				c1 = c;
1447 		}
1448 	}
1449 
1450 	read_unlock(&chan_list_lock);
1451 
1452 	return c1;
1453 }
1454 
1455 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1456 {
1457 	struct hci_conn *hcon = conn->hcon;
1458 	struct l2cap_chan *chan, *pchan;
1459 	u8 dst_type;
1460 
1461 	BT_DBG("");
1462 
1463 	bt_6lowpan_add_conn(conn);
1464 
1465 	/* Check if we have socket listening on cid */
1466 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
1467 					  &hcon->src, &hcon->dst);
1468 	if (!pchan)
1469 		return;
1470 
1471 	/* Client ATT sockets should override the server one */
1472 	if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
1473 		return;
1474 
1475 	dst_type = bdaddr_type(hcon, hcon->dst_type);
1476 
1477 	/* If device is blocked, do not create a channel for it */
1478 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
1479 		return;
1480 
1481 	l2cap_chan_lock(pchan);
1482 
1483 	chan = pchan->ops->new_connection(pchan);
1484 	if (!chan)
1485 		goto clean;
1486 
1487 	bacpy(&chan->src, &hcon->src);
1488 	bacpy(&chan->dst, &hcon->dst);
1489 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
1490 	chan->dst_type = dst_type;
1491 
1492 	__l2cap_chan_add(conn, chan);
1493 
1494 clean:
1495 	l2cap_chan_unlock(pchan);
1496 }
1497 
1498 static void l2cap_conn_ready(struct l2cap_conn *conn)
1499 {
1500 	struct l2cap_chan *chan;
1501 	struct hci_conn *hcon = conn->hcon;
1502 
1503 	BT_DBG("conn %p", conn);
1504 
1505 	/* For outgoing pairing which doesn't necessarily have an
1506 	 * associated socket (e.g. mgmt_pair_device).
1507 	 */
1508 	if (hcon->out && hcon->type == LE_LINK)
1509 		smp_conn_security(hcon, hcon->pending_sec_level);
1510 
1511 	mutex_lock(&conn->chan_lock);
1512 
1513 	if (hcon->type == LE_LINK)
1514 		l2cap_le_conn_ready(conn);
1515 
1516 	list_for_each_entry(chan, &conn->chan_l, list) {
1517 
1518 		l2cap_chan_lock(chan);
1519 
1520 		if (chan->scid == L2CAP_CID_A2MP) {
1521 			l2cap_chan_unlock(chan);
1522 			continue;
1523 		}
1524 
1525 		if (hcon->type == LE_LINK) {
1526 			l2cap_le_start(chan);
1527 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 			l2cap_chan_ready(chan);
1529 
1530 		} else if (chan->state == BT_CONNECT) {
1531 			l2cap_do_start(chan);
1532 		}
1533 
1534 		l2cap_chan_unlock(chan);
1535 	}
1536 
1537 	mutex_unlock(&conn->chan_lock);
1538 
1539 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1540 }
1541 
1542 /* Notify sockets that we cannot guaranty reliability anymore */
1543 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1544 {
1545 	struct l2cap_chan *chan;
1546 
1547 	BT_DBG("conn %p", conn);
1548 
1549 	mutex_lock(&conn->chan_lock);
1550 
1551 	list_for_each_entry(chan, &conn->chan_l, list) {
1552 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1553 			l2cap_chan_set_err(chan, err);
1554 	}
1555 
1556 	mutex_unlock(&conn->chan_lock);
1557 }
1558 
1559 static void l2cap_info_timeout(struct work_struct *work)
1560 {
1561 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1562 					       info_timer.work);
1563 
1564 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1565 	conn->info_ident = 0;
1566 
1567 	l2cap_conn_start(conn);
1568 }
1569 
1570 /*
1571  * l2cap_user
1572  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1573  * callback is called during registration. The ->remove callback is called
1574  * during unregistration.
1575  * An l2cap_user object can either be explicitly unregistered or when the
1576  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1577  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1578  * External modules must own a reference to the l2cap_conn object if they intend
1579  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1580  * any time if they don't.
1581  */
1582 
1583 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1584 {
1585 	struct hci_dev *hdev = conn->hcon->hdev;
1586 	int ret;
1587 
1588 	/* We need to check whether l2cap_conn is registered. If it is not, we
1589 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1590 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1591 	 * relies on the parent hci_conn object to be locked. This itself relies
1592 	 * on the hci_dev object to be locked. So we must lock the hci device
1593 	 * here, too. */
1594 
1595 	hci_dev_lock(hdev);
1596 
1597 	if (user->list.next || user->list.prev) {
1598 		ret = -EINVAL;
1599 		goto out_unlock;
1600 	}
1601 
1602 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1603 	if (!conn->hchan) {
1604 		ret = -ENODEV;
1605 		goto out_unlock;
1606 	}
1607 
1608 	ret = user->probe(conn, user);
1609 	if (ret)
1610 		goto out_unlock;
1611 
1612 	list_add(&user->list, &conn->users);
1613 	ret = 0;
1614 
1615 out_unlock:
1616 	hci_dev_unlock(hdev);
1617 	return ret;
1618 }
1619 EXPORT_SYMBOL(l2cap_register_user);
1620 
1621 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1622 {
1623 	struct hci_dev *hdev = conn->hcon->hdev;
1624 
1625 	hci_dev_lock(hdev);
1626 
1627 	if (!user->list.next || !user->list.prev)
1628 		goto out_unlock;
1629 
1630 	list_del(&user->list);
1631 	user->list.next = NULL;
1632 	user->list.prev = NULL;
1633 	user->remove(conn, user);
1634 
1635 out_unlock:
1636 	hci_dev_unlock(hdev);
1637 }
1638 EXPORT_SYMBOL(l2cap_unregister_user);
1639 
1640 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1641 {
1642 	struct l2cap_user *user;
1643 
1644 	while (!list_empty(&conn->users)) {
1645 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1646 		list_del(&user->list);
1647 		user->list.next = NULL;
1648 		user->list.prev = NULL;
1649 		user->remove(conn, user);
1650 	}
1651 }
1652 
1653 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1654 {
1655 	struct l2cap_conn *conn = hcon->l2cap_data;
1656 	struct l2cap_chan *chan, *l;
1657 
1658 	if (!conn)
1659 		return;
1660 
1661 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1662 
1663 	kfree_skb(conn->rx_skb);
1664 
1665 	skb_queue_purge(&conn->pending_rx);
1666 
1667 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1668 	 * might block if we are running on a worker from the same workqueue
1669 	 * pending_rx_work is waiting on.
1670 	 */
1671 	if (work_pending(&conn->pending_rx_work))
1672 		cancel_work_sync(&conn->pending_rx_work);
1673 
1674 	l2cap_unregister_all_users(conn);
1675 
1676 	mutex_lock(&conn->chan_lock);
1677 
1678 	/* Kill channels */
1679 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1680 		l2cap_chan_hold(chan);
1681 		l2cap_chan_lock(chan);
1682 
1683 		l2cap_chan_del(chan, err);
1684 
1685 		l2cap_chan_unlock(chan);
1686 
1687 		chan->ops->close(chan);
1688 		l2cap_chan_put(chan);
1689 	}
1690 
1691 	mutex_unlock(&conn->chan_lock);
1692 
1693 	hci_chan_del(conn->hchan);
1694 
1695 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1696 		cancel_delayed_work_sync(&conn->info_timer);
1697 
1698 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1699 		cancel_delayed_work_sync(&conn->security_timer);
1700 		smp_chan_destroy(conn);
1701 	}
1702 
1703 	hcon->l2cap_data = NULL;
1704 	conn->hchan = NULL;
1705 	l2cap_conn_put(conn);
1706 }
1707 
1708 static void security_timeout(struct work_struct *work)
1709 {
1710 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1711 					       security_timer.work);
1712 
1713 	BT_DBG("conn %p", conn);
1714 
1715 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1716 		smp_chan_destroy(conn);
1717 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1718 	}
1719 }
1720 
1721 static void l2cap_conn_free(struct kref *ref)
1722 {
1723 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1724 
1725 	hci_conn_put(conn->hcon);
1726 	kfree(conn);
1727 }
1728 
1729 void l2cap_conn_get(struct l2cap_conn *conn)
1730 {
1731 	kref_get(&conn->ref);
1732 }
1733 EXPORT_SYMBOL(l2cap_conn_get);
1734 
1735 void l2cap_conn_put(struct l2cap_conn *conn)
1736 {
1737 	kref_put(&conn->ref, l2cap_conn_free);
1738 }
1739 EXPORT_SYMBOL(l2cap_conn_put);
1740 
1741 /* ---- Socket interface ---- */
1742 
1743 /* Find socket with psm and source / destination bdaddr.
1744  * Returns closest match.
1745  */
1746 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1747 						   bdaddr_t *src,
1748 						   bdaddr_t *dst,
1749 						   u8 link_type)
1750 {
1751 	struct l2cap_chan *c, *c1 = NULL;
1752 
1753 	read_lock(&chan_list_lock);
1754 
1755 	list_for_each_entry(c, &chan_list, global_l) {
1756 		if (state && c->state != state)
1757 			continue;
1758 
1759 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1760 			continue;
1761 
1762 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1763 			continue;
1764 
1765 		if (c->psm == psm) {
1766 			int src_match, dst_match;
1767 			int src_any, dst_any;
1768 
1769 			/* Exact match. */
1770 			src_match = !bacmp(&c->src, src);
1771 			dst_match = !bacmp(&c->dst, dst);
1772 			if (src_match && dst_match) {
1773 				read_unlock(&chan_list_lock);
1774 				return c;
1775 			}
1776 
1777 			/* Closest match */
1778 			src_any = !bacmp(&c->src, BDADDR_ANY);
1779 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1780 			if ((src_match && dst_any) || (src_any && dst_match) ||
1781 			    (src_any && dst_any))
1782 				c1 = c;
1783 		}
1784 	}
1785 
1786 	read_unlock(&chan_list_lock);
1787 
1788 	return c1;
1789 }
1790 
1791 static void l2cap_monitor_timeout(struct work_struct *work)
1792 {
1793 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1794 					       monitor_timer.work);
1795 
1796 	BT_DBG("chan %p", chan);
1797 
1798 	l2cap_chan_lock(chan);
1799 
1800 	if (!chan->conn) {
1801 		l2cap_chan_unlock(chan);
1802 		l2cap_chan_put(chan);
1803 		return;
1804 	}
1805 
1806 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1807 
1808 	l2cap_chan_unlock(chan);
1809 	l2cap_chan_put(chan);
1810 }
1811 
1812 static void l2cap_retrans_timeout(struct work_struct *work)
1813 {
1814 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1815 					       retrans_timer.work);
1816 
1817 	BT_DBG("chan %p", chan);
1818 
1819 	l2cap_chan_lock(chan);
1820 
1821 	if (!chan->conn) {
1822 		l2cap_chan_unlock(chan);
1823 		l2cap_chan_put(chan);
1824 		return;
1825 	}
1826 
1827 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1828 	l2cap_chan_unlock(chan);
1829 	l2cap_chan_put(chan);
1830 }
1831 
1832 static void l2cap_streaming_send(struct l2cap_chan *chan,
1833 				 struct sk_buff_head *skbs)
1834 {
1835 	struct sk_buff *skb;
1836 	struct l2cap_ctrl *control;
1837 
1838 	BT_DBG("chan %p, skbs %p", chan, skbs);
1839 
1840 	if (__chan_is_moving(chan))
1841 		return;
1842 
1843 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1844 
1845 	while (!skb_queue_empty(&chan->tx_q)) {
1846 
1847 		skb = skb_dequeue(&chan->tx_q);
1848 
1849 		bt_cb(skb)->control.retries = 1;
1850 		control = &bt_cb(skb)->control;
1851 
1852 		control->reqseq = 0;
1853 		control->txseq = chan->next_tx_seq;
1854 
1855 		__pack_control(chan, control, skb);
1856 
1857 		if (chan->fcs == L2CAP_FCS_CRC16) {
1858 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1859 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1860 		}
1861 
1862 		l2cap_do_send(chan, skb);
1863 
1864 		BT_DBG("Sent txseq %u", control->txseq);
1865 
1866 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1867 		chan->frames_sent++;
1868 	}
1869 }
1870 
1871 static int l2cap_ertm_send(struct l2cap_chan *chan)
1872 {
1873 	struct sk_buff *skb, *tx_skb;
1874 	struct l2cap_ctrl *control;
1875 	int sent = 0;
1876 
1877 	BT_DBG("chan %p", chan);
1878 
1879 	if (chan->state != BT_CONNECTED)
1880 		return -ENOTCONN;
1881 
1882 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1883 		return 0;
1884 
1885 	if (__chan_is_moving(chan))
1886 		return 0;
1887 
1888 	while (chan->tx_send_head &&
1889 	       chan->unacked_frames < chan->remote_tx_win &&
1890 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1891 
1892 		skb = chan->tx_send_head;
1893 
1894 		bt_cb(skb)->control.retries = 1;
1895 		control = &bt_cb(skb)->control;
1896 
1897 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1898 			control->final = 1;
1899 
1900 		control->reqseq = chan->buffer_seq;
1901 		chan->last_acked_seq = chan->buffer_seq;
1902 		control->txseq = chan->next_tx_seq;
1903 
1904 		__pack_control(chan, control, skb);
1905 
1906 		if (chan->fcs == L2CAP_FCS_CRC16) {
1907 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1908 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1909 		}
1910 
1911 		/* Clone after data has been modified. Data is assumed to be
1912 		   read-only (for locking purposes) on cloned sk_buffs.
1913 		 */
1914 		tx_skb = skb_clone(skb, GFP_KERNEL);
1915 
1916 		if (!tx_skb)
1917 			break;
1918 
1919 		__set_retrans_timer(chan);
1920 
1921 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1922 		chan->unacked_frames++;
1923 		chan->frames_sent++;
1924 		sent++;
1925 
1926 		if (skb_queue_is_last(&chan->tx_q, skb))
1927 			chan->tx_send_head = NULL;
1928 		else
1929 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1930 
1931 		l2cap_do_send(chan, tx_skb);
1932 		BT_DBG("Sent txseq %u", control->txseq);
1933 	}
1934 
1935 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1936 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1937 
1938 	return sent;
1939 }
1940 
1941 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1942 {
1943 	struct l2cap_ctrl control;
1944 	struct sk_buff *skb;
1945 	struct sk_buff *tx_skb;
1946 	u16 seq;
1947 
1948 	BT_DBG("chan %p", chan);
1949 
1950 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1951 		return;
1952 
1953 	if (__chan_is_moving(chan))
1954 		return;
1955 
1956 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1957 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1958 
1959 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1960 		if (!skb) {
1961 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1962 			       seq);
1963 			continue;
1964 		}
1965 
1966 		bt_cb(skb)->control.retries++;
1967 		control = bt_cb(skb)->control;
1968 
1969 		if (chan->max_tx != 0 &&
1970 		    bt_cb(skb)->control.retries > chan->max_tx) {
1971 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1972 			l2cap_send_disconn_req(chan, ECONNRESET);
1973 			l2cap_seq_list_clear(&chan->retrans_list);
1974 			break;
1975 		}
1976 
1977 		control.reqseq = chan->buffer_seq;
1978 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1979 			control.final = 1;
1980 		else
1981 			control.final = 0;
1982 
1983 		if (skb_cloned(skb)) {
1984 			/* Cloned sk_buffs are read-only, so we need a
1985 			 * writeable copy
1986 			 */
1987 			tx_skb = skb_copy(skb, GFP_KERNEL);
1988 		} else {
1989 			tx_skb = skb_clone(skb, GFP_KERNEL);
1990 		}
1991 
1992 		if (!tx_skb) {
1993 			l2cap_seq_list_clear(&chan->retrans_list);
1994 			break;
1995 		}
1996 
1997 		/* Update skb contents */
1998 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1999 			put_unaligned_le32(__pack_extended_control(&control),
2000 					   tx_skb->data + L2CAP_HDR_SIZE);
2001 		} else {
2002 			put_unaligned_le16(__pack_enhanced_control(&control),
2003 					   tx_skb->data + L2CAP_HDR_SIZE);
2004 		}
2005 
2006 		if (chan->fcs == L2CAP_FCS_CRC16) {
2007 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2008 			put_unaligned_le16(fcs, skb_put(tx_skb,
2009 							L2CAP_FCS_SIZE));
2010 		}
2011 
2012 		l2cap_do_send(chan, tx_skb);
2013 
2014 		BT_DBG("Resent txseq %d", control.txseq);
2015 
2016 		chan->last_acked_seq = chan->buffer_seq;
2017 	}
2018 }
2019 
2020 static void l2cap_retransmit(struct l2cap_chan *chan,
2021 			     struct l2cap_ctrl *control)
2022 {
2023 	BT_DBG("chan %p, control %p", chan, control);
2024 
2025 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2026 	l2cap_ertm_resend(chan);
2027 }
2028 
2029 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2030 				 struct l2cap_ctrl *control)
2031 {
2032 	struct sk_buff *skb;
2033 
2034 	BT_DBG("chan %p, control %p", chan, control);
2035 
2036 	if (control->poll)
2037 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2038 
2039 	l2cap_seq_list_clear(&chan->retrans_list);
2040 
2041 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2042 		return;
2043 
2044 	if (chan->unacked_frames) {
2045 		skb_queue_walk(&chan->tx_q, skb) {
2046 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2047 			    skb == chan->tx_send_head)
2048 				break;
2049 		}
2050 
2051 		skb_queue_walk_from(&chan->tx_q, skb) {
2052 			if (skb == chan->tx_send_head)
2053 				break;
2054 
2055 			l2cap_seq_list_append(&chan->retrans_list,
2056 					      bt_cb(skb)->control.txseq);
2057 		}
2058 
2059 		l2cap_ertm_resend(chan);
2060 	}
2061 }
2062 
2063 static void l2cap_send_ack(struct l2cap_chan *chan)
2064 {
2065 	struct l2cap_ctrl control;
2066 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2067 					 chan->last_acked_seq);
2068 	int threshold;
2069 
2070 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2071 	       chan, chan->last_acked_seq, chan->buffer_seq);
2072 
2073 	memset(&control, 0, sizeof(control));
2074 	control.sframe = 1;
2075 
2076 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2077 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2078 		__clear_ack_timer(chan);
2079 		control.super = L2CAP_SUPER_RNR;
2080 		control.reqseq = chan->buffer_seq;
2081 		l2cap_send_sframe(chan, &control);
2082 	} else {
2083 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2084 			l2cap_ertm_send(chan);
2085 			/* If any i-frames were sent, they included an ack */
2086 			if (chan->buffer_seq == chan->last_acked_seq)
2087 				frames_to_ack = 0;
2088 		}
2089 
2090 		/* Ack now if the window is 3/4ths full.
2091 		 * Calculate without mul or div
2092 		 */
2093 		threshold = chan->ack_win;
2094 		threshold += threshold << 1;
2095 		threshold >>= 2;
2096 
2097 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2098 		       threshold);
2099 
2100 		if (frames_to_ack >= threshold) {
2101 			__clear_ack_timer(chan);
2102 			control.super = L2CAP_SUPER_RR;
2103 			control.reqseq = chan->buffer_seq;
2104 			l2cap_send_sframe(chan, &control);
2105 			frames_to_ack = 0;
2106 		}
2107 
2108 		if (frames_to_ack)
2109 			__set_ack_timer(chan);
2110 	}
2111 }
2112 
2113 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2114 					 struct msghdr *msg, int len,
2115 					 int count, struct sk_buff *skb)
2116 {
2117 	struct l2cap_conn *conn = chan->conn;
2118 	struct sk_buff **frag;
2119 	int sent = 0;
2120 
2121 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2122 		return -EFAULT;
2123 
2124 	sent += count;
2125 	len  -= count;
2126 
2127 	/* Continuation fragments (no L2CAP header) */
2128 	frag = &skb_shinfo(skb)->frag_list;
2129 	while (len) {
2130 		struct sk_buff *tmp;
2131 
2132 		count = min_t(unsigned int, conn->mtu, len);
2133 
2134 		tmp = chan->ops->alloc_skb(chan, count,
2135 					   msg->msg_flags & MSG_DONTWAIT);
2136 		if (IS_ERR(tmp))
2137 			return PTR_ERR(tmp);
2138 
2139 		*frag = tmp;
2140 
2141 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2142 			return -EFAULT;
2143 
2144 		(*frag)->priority = skb->priority;
2145 
2146 		sent += count;
2147 		len  -= count;
2148 
2149 		skb->len += (*frag)->len;
2150 		skb->data_len += (*frag)->len;
2151 
2152 		frag = &(*frag)->next;
2153 	}
2154 
2155 	return sent;
2156 }
2157 
2158 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2159 						 struct msghdr *msg, size_t len,
2160 						 u32 priority)
2161 {
2162 	struct l2cap_conn *conn = chan->conn;
2163 	struct sk_buff *skb;
2164 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2165 	struct l2cap_hdr *lh;
2166 
2167 	BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
2168 	       __le16_to_cpu(chan->psm), len, priority);
2169 
2170 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2171 
2172 	skb = chan->ops->alloc_skb(chan, count + hlen,
2173 				   msg->msg_flags & MSG_DONTWAIT);
2174 	if (IS_ERR(skb))
2175 		return skb;
2176 
2177 	skb->priority = priority;
2178 
2179 	/* Create L2CAP header */
2180 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2181 	lh->cid = cpu_to_le16(chan->dcid);
2182 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2183 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2184 
2185 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2186 	if (unlikely(err < 0)) {
2187 		kfree_skb(skb);
2188 		return ERR_PTR(err);
2189 	}
2190 	return skb;
2191 }
2192 
2193 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2194 					      struct msghdr *msg, size_t len,
2195 					      u32 priority)
2196 {
2197 	struct l2cap_conn *conn = chan->conn;
2198 	struct sk_buff *skb;
2199 	int err, count;
2200 	struct l2cap_hdr *lh;
2201 
2202 	BT_DBG("chan %p len %zu", chan, len);
2203 
2204 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2205 
2206 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2207 				   msg->msg_flags & MSG_DONTWAIT);
2208 	if (IS_ERR(skb))
2209 		return skb;
2210 
2211 	skb->priority = priority;
2212 
2213 	/* Create L2CAP header */
2214 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2215 	lh->cid = cpu_to_le16(chan->dcid);
2216 	lh->len = cpu_to_le16(len);
2217 
2218 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2219 	if (unlikely(err < 0)) {
2220 		kfree_skb(skb);
2221 		return ERR_PTR(err);
2222 	}
2223 	return skb;
2224 }
2225 
2226 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2227 					       struct msghdr *msg, size_t len,
2228 					       u16 sdulen)
2229 {
2230 	struct l2cap_conn *conn = chan->conn;
2231 	struct sk_buff *skb;
2232 	int err, count, hlen;
2233 	struct l2cap_hdr *lh;
2234 
2235 	BT_DBG("chan %p len %zu", chan, len);
2236 
2237 	if (!conn)
2238 		return ERR_PTR(-ENOTCONN);
2239 
2240 	hlen = __ertm_hdr_size(chan);
2241 
2242 	if (sdulen)
2243 		hlen += L2CAP_SDULEN_SIZE;
2244 
2245 	if (chan->fcs == L2CAP_FCS_CRC16)
2246 		hlen += L2CAP_FCS_SIZE;
2247 
2248 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2249 
2250 	skb = chan->ops->alloc_skb(chan, count + hlen,
2251 				   msg->msg_flags & MSG_DONTWAIT);
2252 	if (IS_ERR(skb))
2253 		return skb;
2254 
2255 	/* Create L2CAP header */
2256 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2257 	lh->cid = cpu_to_le16(chan->dcid);
2258 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2259 
2260 	/* Control header is populated later */
2261 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2262 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2263 	else
2264 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2265 
2266 	if (sdulen)
2267 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2268 
2269 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2270 	if (unlikely(err < 0)) {
2271 		kfree_skb(skb);
2272 		return ERR_PTR(err);
2273 	}
2274 
2275 	bt_cb(skb)->control.fcs = chan->fcs;
2276 	bt_cb(skb)->control.retries = 0;
2277 	return skb;
2278 }
2279 
2280 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2281 			     struct sk_buff_head *seg_queue,
2282 			     struct msghdr *msg, size_t len)
2283 {
2284 	struct sk_buff *skb;
2285 	u16 sdu_len;
2286 	size_t pdu_len;
2287 	u8 sar;
2288 
2289 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2290 
2291 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2292 	 * so fragmented skbs are not used.  The HCI layer's handling
2293 	 * of fragmented skbs is not compatible with ERTM's queueing.
2294 	 */
2295 
2296 	/* PDU size is derived from the HCI MTU */
2297 	pdu_len = chan->conn->mtu;
2298 
2299 	/* Constrain PDU size for BR/EDR connections */
2300 	if (!chan->hs_hcon)
2301 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2302 
2303 	/* Adjust for largest possible L2CAP overhead. */
2304 	if (chan->fcs)
2305 		pdu_len -= L2CAP_FCS_SIZE;
2306 
2307 	pdu_len -= __ertm_hdr_size(chan);
2308 
2309 	/* Remote device may have requested smaller PDUs */
2310 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2311 
2312 	if (len <= pdu_len) {
2313 		sar = L2CAP_SAR_UNSEGMENTED;
2314 		sdu_len = 0;
2315 		pdu_len = len;
2316 	} else {
2317 		sar = L2CAP_SAR_START;
2318 		sdu_len = len;
2319 		pdu_len -= L2CAP_SDULEN_SIZE;
2320 	}
2321 
2322 	while (len > 0) {
2323 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2324 
2325 		if (IS_ERR(skb)) {
2326 			__skb_queue_purge(seg_queue);
2327 			return PTR_ERR(skb);
2328 		}
2329 
2330 		bt_cb(skb)->control.sar = sar;
2331 		__skb_queue_tail(seg_queue, skb);
2332 
2333 		len -= pdu_len;
2334 		if (sdu_len) {
2335 			sdu_len = 0;
2336 			pdu_len += L2CAP_SDULEN_SIZE;
2337 		}
2338 
2339 		if (len <= pdu_len) {
2340 			sar = L2CAP_SAR_END;
2341 			pdu_len = len;
2342 		} else {
2343 			sar = L2CAP_SAR_CONTINUE;
2344 		}
2345 	}
2346 
2347 	return 0;
2348 }
2349 
2350 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2351 						   struct msghdr *msg,
2352 						   size_t len, u16 sdulen)
2353 {
2354 	struct l2cap_conn *conn = chan->conn;
2355 	struct sk_buff *skb;
2356 	int err, count, hlen;
2357 	struct l2cap_hdr *lh;
2358 
2359 	BT_DBG("chan %p len %zu", chan, len);
2360 
2361 	if (!conn)
2362 		return ERR_PTR(-ENOTCONN);
2363 
2364 	hlen = L2CAP_HDR_SIZE;
2365 
2366 	if (sdulen)
2367 		hlen += L2CAP_SDULEN_SIZE;
2368 
2369 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2370 
2371 	skb = chan->ops->alloc_skb(chan, count + hlen,
2372 				   msg->msg_flags & MSG_DONTWAIT);
2373 	if (IS_ERR(skb))
2374 		return skb;
2375 
2376 	/* Create L2CAP header */
2377 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2378 	lh->cid = cpu_to_le16(chan->dcid);
2379 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2380 
2381 	if (sdulen)
2382 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2383 
2384 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2385 	if (unlikely(err < 0)) {
2386 		kfree_skb(skb);
2387 		return ERR_PTR(err);
2388 	}
2389 
2390 	return skb;
2391 }
2392 
2393 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2394 				struct sk_buff_head *seg_queue,
2395 				struct msghdr *msg, size_t len)
2396 {
2397 	struct sk_buff *skb;
2398 	size_t pdu_len;
2399 	u16 sdu_len;
2400 
2401 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2402 
2403 	pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2404 
2405 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2406 
2407 	sdu_len = len;
2408 	pdu_len -= L2CAP_SDULEN_SIZE;
2409 
2410 	while (len > 0) {
2411 		if (len <= pdu_len)
2412 			pdu_len = len;
2413 
2414 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2415 		if (IS_ERR(skb)) {
2416 			__skb_queue_purge(seg_queue);
2417 			return PTR_ERR(skb);
2418 		}
2419 
2420 		__skb_queue_tail(seg_queue, skb);
2421 
2422 		len -= pdu_len;
2423 
2424 		if (sdu_len) {
2425 			sdu_len = 0;
2426 			pdu_len += L2CAP_SDULEN_SIZE;
2427 		}
2428 	}
2429 
2430 	return 0;
2431 }
2432 
2433 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2434 		    u32 priority)
2435 {
2436 	struct sk_buff *skb;
2437 	int err;
2438 	struct sk_buff_head seg_queue;
2439 
2440 	if (!chan->conn)
2441 		return -ENOTCONN;
2442 
2443 	/* Connectionless channel */
2444 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2445 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2446 		if (IS_ERR(skb))
2447 			return PTR_ERR(skb);
2448 
2449 		/* Channel lock is released before requesting new skb and then
2450 		 * reacquired thus we need to recheck channel state.
2451 		 */
2452 		if (chan->state != BT_CONNECTED) {
2453 			kfree_skb(skb);
2454 			return -ENOTCONN;
2455 		}
2456 
2457 		l2cap_do_send(chan, skb);
2458 		return len;
2459 	}
2460 
2461 	switch (chan->mode) {
2462 	case L2CAP_MODE_LE_FLOWCTL:
2463 		/* Check outgoing MTU */
2464 		if (len > chan->omtu)
2465 			return -EMSGSIZE;
2466 
2467 		if (!chan->tx_credits)
2468 			return -EAGAIN;
2469 
2470 		__skb_queue_head_init(&seg_queue);
2471 
2472 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2473 
2474 		if (chan->state != BT_CONNECTED) {
2475 			__skb_queue_purge(&seg_queue);
2476 			err = -ENOTCONN;
2477 		}
2478 
2479 		if (err)
2480 			return err;
2481 
2482 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2483 
2484 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2485 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2486 			chan->tx_credits--;
2487 		}
2488 
2489 		if (!chan->tx_credits)
2490 			chan->ops->suspend(chan);
2491 
2492 		err = len;
2493 
2494 		break;
2495 
2496 	case L2CAP_MODE_BASIC:
2497 		/* Check outgoing MTU */
2498 		if (len > chan->omtu)
2499 			return -EMSGSIZE;
2500 
2501 		/* Create a basic PDU */
2502 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2503 		if (IS_ERR(skb))
2504 			return PTR_ERR(skb);
2505 
2506 		/* Channel lock is released before requesting new skb and then
2507 		 * reacquired thus we need to recheck channel state.
2508 		 */
2509 		if (chan->state != BT_CONNECTED) {
2510 			kfree_skb(skb);
2511 			return -ENOTCONN;
2512 		}
2513 
2514 		l2cap_do_send(chan, skb);
2515 		err = len;
2516 		break;
2517 
2518 	case L2CAP_MODE_ERTM:
2519 	case L2CAP_MODE_STREAMING:
2520 		/* Check outgoing MTU */
2521 		if (len > chan->omtu) {
2522 			err = -EMSGSIZE;
2523 			break;
2524 		}
2525 
2526 		__skb_queue_head_init(&seg_queue);
2527 
2528 		/* Do segmentation before calling in to the state machine,
2529 		 * since it's possible to block while waiting for memory
2530 		 * allocation.
2531 		 */
2532 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2533 
2534 		/* The channel could have been closed while segmenting,
2535 		 * check that it is still connected.
2536 		 */
2537 		if (chan->state != BT_CONNECTED) {
2538 			__skb_queue_purge(&seg_queue);
2539 			err = -ENOTCONN;
2540 		}
2541 
2542 		if (err)
2543 			break;
2544 
2545 		if (chan->mode == L2CAP_MODE_ERTM)
2546 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2547 		else
2548 			l2cap_streaming_send(chan, &seg_queue);
2549 
2550 		err = len;
2551 
2552 		/* If the skbs were not queued for sending, they'll still be in
2553 		 * seg_queue and need to be purged.
2554 		 */
2555 		__skb_queue_purge(&seg_queue);
2556 		break;
2557 
2558 	default:
2559 		BT_DBG("bad state %1.1x", chan->mode);
2560 		err = -EBADFD;
2561 	}
2562 
2563 	return err;
2564 }
2565 
2566 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2567 {
2568 	struct l2cap_ctrl control;
2569 	u16 seq;
2570 
2571 	BT_DBG("chan %p, txseq %u", chan, txseq);
2572 
2573 	memset(&control, 0, sizeof(control));
2574 	control.sframe = 1;
2575 	control.super = L2CAP_SUPER_SREJ;
2576 
2577 	for (seq = chan->expected_tx_seq; seq != txseq;
2578 	     seq = __next_seq(chan, seq)) {
2579 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2580 			control.reqseq = seq;
2581 			l2cap_send_sframe(chan, &control);
2582 			l2cap_seq_list_append(&chan->srej_list, seq);
2583 		}
2584 	}
2585 
2586 	chan->expected_tx_seq = __next_seq(chan, txseq);
2587 }
2588 
2589 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2590 {
2591 	struct l2cap_ctrl control;
2592 
2593 	BT_DBG("chan %p", chan);
2594 
2595 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2596 		return;
2597 
2598 	memset(&control, 0, sizeof(control));
2599 	control.sframe = 1;
2600 	control.super = L2CAP_SUPER_SREJ;
2601 	control.reqseq = chan->srej_list.tail;
2602 	l2cap_send_sframe(chan, &control);
2603 }
2604 
2605 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2606 {
2607 	struct l2cap_ctrl control;
2608 	u16 initial_head;
2609 	u16 seq;
2610 
2611 	BT_DBG("chan %p, txseq %u", chan, txseq);
2612 
2613 	memset(&control, 0, sizeof(control));
2614 	control.sframe = 1;
2615 	control.super = L2CAP_SUPER_SREJ;
2616 
2617 	/* Capture initial list head to allow only one pass through the list. */
2618 	initial_head = chan->srej_list.head;
2619 
2620 	do {
2621 		seq = l2cap_seq_list_pop(&chan->srej_list);
2622 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2623 			break;
2624 
2625 		control.reqseq = seq;
2626 		l2cap_send_sframe(chan, &control);
2627 		l2cap_seq_list_append(&chan->srej_list, seq);
2628 	} while (chan->srej_list.head != initial_head);
2629 }
2630 
2631 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2632 {
2633 	struct sk_buff *acked_skb;
2634 	u16 ackseq;
2635 
2636 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2637 
2638 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2639 		return;
2640 
2641 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2642 	       chan->expected_ack_seq, chan->unacked_frames);
2643 
2644 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2645 	     ackseq = __next_seq(chan, ackseq)) {
2646 
2647 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2648 		if (acked_skb) {
2649 			skb_unlink(acked_skb, &chan->tx_q);
2650 			kfree_skb(acked_skb);
2651 			chan->unacked_frames--;
2652 		}
2653 	}
2654 
2655 	chan->expected_ack_seq = reqseq;
2656 
2657 	if (chan->unacked_frames == 0)
2658 		__clear_retrans_timer(chan);
2659 
2660 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2661 }
2662 
2663 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2664 {
2665 	BT_DBG("chan %p", chan);
2666 
2667 	chan->expected_tx_seq = chan->buffer_seq;
2668 	l2cap_seq_list_clear(&chan->srej_list);
2669 	skb_queue_purge(&chan->srej_q);
2670 	chan->rx_state = L2CAP_RX_STATE_RECV;
2671 }
2672 
2673 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2674 				struct l2cap_ctrl *control,
2675 				struct sk_buff_head *skbs, u8 event)
2676 {
2677 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2678 	       event);
2679 
2680 	switch (event) {
2681 	case L2CAP_EV_DATA_REQUEST:
2682 		if (chan->tx_send_head == NULL)
2683 			chan->tx_send_head = skb_peek(skbs);
2684 
2685 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2686 		l2cap_ertm_send(chan);
2687 		break;
2688 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2689 		BT_DBG("Enter LOCAL_BUSY");
2690 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2691 
2692 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2693 			/* The SREJ_SENT state must be aborted if we are to
2694 			 * enter the LOCAL_BUSY state.
2695 			 */
2696 			l2cap_abort_rx_srej_sent(chan);
2697 		}
2698 
2699 		l2cap_send_ack(chan);
2700 
2701 		break;
2702 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2703 		BT_DBG("Exit LOCAL_BUSY");
2704 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2705 
2706 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2707 			struct l2cap_ctrl local_control;
2708 
2709 			memset(&local_control, 0, sizeof(local_control));
2710 			local_control.sframe = 1;
2711 			local_control.super = L2CAP_SUPER_RR;
2712 			local_control.poll = 1;
2713 			local_control.reqseq = chan->buffer_seq;
2714 			l2cap_send_sframe(chan, &local_control);
2715 
2716 			chan->retry_count = 1;
2717 			__set_monitor_timer(chan);
2718 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2719 		}
2720 		break;
2721 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2722 		l2cap_process_reqseq(chan, control->reqseq);
2723 		break;
2724 	case L2CAP_EV_EXPLICIT_POLL:
2725 		l2cap_send_rr_or_rnr(chan, 1);
2726 		chan->retry_count = 1;
2727 		__set_monitor_timer(chan);
2728 		__clear_ack_timer(chan);
2729 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2730 		break;
2731 	case L2CAP_EV_RETRANS_TO:
2732 		l2cap_send_rr_or_rnr(chan, 1);
2733 		chan->retry_count = 1;
2734 		__set_monitor_timer(chan);
2735 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2736 		break;
2737 	case L2CAP_EV_RECV_FBIT:
2738 		/* Nothing to process */
2739 		break;
2740 	default:
2741 		break;
2742 	}
2743 }
2744 
2745 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2746 				  struct l2cap_ctrl *control,
2747 				  struct sk_buff_head *skbs, u8 event)
2748 {
2749 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2750 	       event);
2751 
2752 	switch (event) {
2753 	case L2CAP_EV_DATA_REQUEST:
2754 		if (chan->tx_send_head == NULL)
2755 			chan->tx_send_head = skb_peek(skbs);
2756 		/* Queue data, but don't send. */
2757 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2758 		break;
2759 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2760 		BT_DBG("Enter LOCAL_BUSY");
2761 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2762 
2763 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2764 			/* The SREJ_SENT state must be aborted if we are to
2765 			 * enter the LOCAL_BUSY state.
2766 			 */
2767 			l2cap_abort_rx_srej_sent(chan);
2768 		}
2769 
2770 		l2cap_send_ack(chan);
2771 
2772 		break;
2773 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2774 		BT_DBG("Exit LOCAL_BUSY");
2775 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2776 
2777 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2778 			struct l2cap_ctrl local_control;
2779 			memset(&local_control, 0, sizeof(local_control));
2780 			local_control.sframe = 1;
2781 			local_control.super = L2CAP_SUPER_RR;
2782 			local_control.poll = 1;
2783 			local_control.reqseq = chan->buffer_seq;
2784 			l2cap_send_sframe(chan, &local_control);
2785 
2786 			chan->retry_count = 1;
2787 			__set_monitor_timer(chan);
2788 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2789 		}
2790 		break;
2791 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2792 		l2cap_process_reqseq(chan, control->reqseq);
2793 
2794 		/* Fall through */
2795 
2796 	case L2CAP_EV_RECV_FBIT:
2797 		if (control && control->final) {
2798 			__clear_monitor_timer(chan);
2799 			if (chan->unacked_frames > 0)
2800 				__set_retrans_timer(chan);
2801 			chan->retry_count = 0;
2802 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2803 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2804 		}
2805 		break;
2806 	case L2CAP_EV_EXPLICIT_POLL:
2807 		/* Ignore */
2808 		break;
2809 	case L2CAP_EV_MONITOR_TO:
2810 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2811 			l2cap_send_rr_or_rnr(chan, 1);
2812 			__set_monitor_timer(chan);
2813 			chan->retry_count++;
2814 		} else {
2815 			l2cap_send_disconn_req(chan, ECONNABORTED);
2816 		}
2817 		break;
2818 	default:
2819 		break;
2820 	}
2821 }
2822 
2823 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2824 		     struct sk_buff_head *skbs, u8 event)
2825 {
2826 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2827 	       chan, control, skbs, event, chan->tx_state);
2828 
2829 	switch (chan->tx_state) {
2830 	case L2CAP_TX_STATE_XMIT:
2831 		l2cap_tx_state_xmit(chan, control, skbs, event);
2832 		break;
2833 	case L2CAP_TX_STATE_WAIT_F:
2834 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2835 		break;
2836 	default:
2837 		/* Ignore event */
2838 		break;
2839 	}
2840 }
2841 
2842 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2843 			     struct l2cap_ctrl *control)
2844 {
2845 	BT_DBG("chan %p, control %p", chan, control);
2846 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2847 }
2848 
2849 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2850 				  struct l2cap_ctrl *control)
2851 {
2852 	BT_DBG("chan %p, control %p", chan, control);
2853 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2854 }
2855 
2856 /* Copy frame to all raw sockets on that connection */
2857 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2858 {
2859 	struct sk_buff *nskb;
2860 	struct l2cap_chan *chan;
2861 
2862 	BT_DBG("conn %p", conn);
2863 
2864 	mutex_lock(&conn->chan_lock);
2865 
2866 	list_for_each_entry(chan, &conn->chan_l, list) {
2867 		if (chan->chan_type != L2CAP_CHAN_RAW)
2868 			continue;
2869 
2870 		/* Don't send frame to the channel it came from */
2871 		if (bt_cb(skb)->chan == chan)
2872 			continue;
2873 
2874 		nskb = skb_clone(skb, GFP_KERNEL);
2875 		if (!nskb)
2876 			continue;
2877 		if (chan->ops->recv(chan, nskb))
2878 			kfree_skb(nskb);
2879 	}
2880 
2881 	mutex_unlock(&conn->chan_lock);
2882 }
2883 
2884 /* ---- L2CAP signalling commands ---- */
2885 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2886 				       u8 ident, u16 dlen, void *data)
2887 {
2888 	struct sk_buff *skb, **frag;
2889 	struct l2cap_cmd_hdr *cmd;
2890 	struct l2cap_hdr *lh;
2891 	int len, count;
2892 
2893 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2894 	       conn, code, ident, dlen);
2895 
2896 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2897 		return NULL;
2898 
2899 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2900 	count = min_t(unsigned int, conn->mtu, len);
2901 
2902 	skb = bt_skb_alloc(count, GFP_KERNEL);
2903 	if (!skb)
2904 		return NULL;
2905 
2906 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2907 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2908 
2909 	if (conn->hcon->type == LE_LINK)
2910 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2911 	else
2912 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2913 
2914 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2915 	cmd->code  = code;
2916 	cmd->ident = ident;
2917 	cmd->len   = cpu_to_le16(dlen);
2918 
2919 	if (dlen) {
2920 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2921 		memcpy(skb_put(skb, count), data, count);
2922 		data += count;
2923 	}
2924 
2925 	len -= skb->len;
2926 
2927 	/* Continuation fragments (no L2CAP header) */
2928 	frag = &skb_shinfo(skb)->frag_list;
2929 	while (len) {
2930 		count = min_t(unsigned int, conn->mtu, len);
2931 
2932 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2933 		if (!*frag)
2934 			goto fail;
2935 
2936 		memcpy(skb_put(*frag, count), data, count);
2937 
2938 		len  -= count;
2939 		data += count;
2940 
2941 		frag = &(*frag)->next;
2942 	}
2943 
2944 	return skb;
2945 
2946 fail:
2947 	kfree_skb(skb);
2948 	return NULL;
2949 }
2950 
2951 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2952 				     unsigned long *val)
2953 {
2954 	struct l2cap_conf_opt *opt = *ptr;
2955 	int len;
2956 
2957 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2958 	*ptr += len;
2959 
2960 	*type = opt->type;
2961 	*olen = opt->len;
2962 
2963 	switch (opt->len) {
2964 	case 1:
2965 		*val = *((u8 *) opt->val);
2966 		break;
2967 
2968 	case 2:
2969 		*val = get_unaligned_le16(opt->val);
2970 		break;
2971 
2972 	case 4:
2973 		*val = get_unaligned_le32(opt->val);
2974 		break;
2975 
2976 	default:
2977 		*val = (unsigned long) opt->val;
2978 		break;
2979 	}
2980 
2981 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2982 	return len;
2983 }
2984 
2985 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2986 {
2987 	struct l2cap_conf_opt *opt = *ptr;
2988 
2989 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2990 
2991 	opt->type = type;
2992 	opt->len  = len;
2993 
2994 	switch (len) {
2995 	case 1:
2996 		*((u8 *) opt->val)  = val;
2997 		break;
2998 
2999 	case 2:
3000 		put_unaligned_le16(val, opt->val);
3001 		break;
3002 
3003 	case 4:
3004 		put_unaligned_le32(val, opt->val);
3005 		break;
3006 
3007 	default:
3008 		memcpy(opt->val, (void *) val, len);
3009 		break;
3010 	}
3011 
3012 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3013 }
3014 
3015 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3016 {
3017 	struct l2cap_conf_efs efs;
3018 
3019 	switch (chan->mode) {
3020 	case L2CAP_MODE_ERTM:
3021 		efs.id		= chan->local_id;
3022 		efs.stype	= chan->local_stype;
3023 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3024 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3025 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3026 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3027 		break;
3028 
3029 	case L2CAP_MODE_STREAMING:
3030 		efs.id		= 1;
3031 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3032 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3033 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3034 		efs.acc_lat	= 0;
3035 		efs.flush_to	= 0;
3036 		break;
3037 
3038 	default:
3039 		return;
3040 	}
3041 
3042 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3043 			   (unsigned long) &efs);
3044 }
3045 
3046 static void l2cap_ack_timeout(struct work_struct *work)
3047 {
3048 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3049 					       ack_timer.work);
3050 	u16 frames_to_ack;
3051 
3052 	BT_DBG("chan %p", chan);
3053 
3054 	l2cap_chan_lock(chan);
3055 
3056 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3057 				     chan->last_acked_seq);
3058 
3059 	if (frames_to_ack)
3060 		l2cap_send_rr_or_rnr(chan, 0);
3061 
3062 	l2cap_chan_unlock(chan);
3063 	l2cap_chan_put(chan);
3064 }
3065 
3066 int l2cap_ertm_init(struct l2cap_chan *chan)
3067 {
3068 	int err;
3069 
3070 	chan->next_tx_seq = 0;
3071 	chan->expected_tx_seq = 0;
3072 	chan->expected_ack_seq = 0;
3073 	chan->unacked_frames = 0;
3074 	chan->buffer_seq = 0;
3075 	chan->frames_sent = 0;
3076 	chan->last_acked_seq = 0;
3077 	chan->sdu = NULL;
3078 	chan->sdu_last_frag = NULL;
3079 	chan->sdu_len = 0;
3080 
3081 	skb_queue_head_init(&chan->tx_q);
3082 
3083 	chan->local_amp_id = AMP_ID_BREDR;
3084 	chan->move_id = AMP_ID_BREDR;
3085 	chan->move_state = L2CAP_MOVE_STABLE;
3086 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3087 
3088 	if (chan->mode != L2CAP_MODE_ERTM)
3089 		return 0;
3090 
3091 	chan->rx_state = L2CAP_RX_STATE_RECV;
3092 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3093 
3094 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3095 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3096 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3097 
3098 	skb_queue_head_init(&chan->srej_q);
3099 
3100 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3101 	if (err < 0)
3102 		return err;
3103 
3104 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3105 	if (err < 0)
3106 		l2cap_seq_list_free(&chan->srej_list);
3107 
3108 	return err;
3109 }
3110 
3111 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3112 {
3113 	switch (mode) {
3114 	case L2CAP_MODE_STREAMING:
3115 	case L2CAP_MODE_ERTM:
3116 		if (l2cap_mode_supported(mode, remote_feat_mask))
3117 			return mode;
3118 		/* fall through */
3119 	default:
3120 		return L2CAP_MODE_BASIC;
3121 	}
3122 }
3123 
3124 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3125 {
3126 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3127 }
3128 
3129 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3130 {
3131 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3132 }
3133 
3134 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3135 				      struct l2cap_conf_rfc *rfc)
3136 {
3137 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3138 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3139 
3140 		/* Class 1 devices have must have ERTM timeouts
3141 		 * exceeding the Link Supervision Timeout.  The
3142 		 * default Link Supervision Timeout for AMP
3143 		 * controllers is 10 seconds.
3144 		 *
3145 		 * Class 1 devices use 0xffffffff for their
3146 		 * best-effort flush timeout, so the clamping logic
3147 		 * will result in a timeout that meets the above
3148 		 * requirement.  ERTM timeouts are 16-bit values, so
3149 		 * the maximum timeout is 65.535 seconds.
3150 		 */
3151 
3152 		/* Convert timeout to milliseconds and round */
3153 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3154 
3155 		/* This is the recommended formula for class 2 devices
3156 		 * that start ERTM timers when packets are sent to the
3157 		 * controller.
3158 		 */
3159 		ertm_to = 3 * ertm_to + 500;
3160 
3161 		if (ertm_to > 0xffff)
3162 			ertm_to = 0xffff;
3163 
3164 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3165 		rfc->monitor_timeout = rfc->retrans_timeout;
3166 	} else {
3167 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3168 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3169 	}
3170 }
3171 
3172 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3173 {
3174 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3175 	    __l2cap_ews_supported(chan->conn)) {
3176 		/* use extended control field */
3177 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3178 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3179 	} else {
3180 		chan->tx_win = min_t(u16, chan->tx_win,
3181 				     L2CAP_DEFAULT_TX_WINDOW);
3182 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3183 	}
3184 	chan->ack_win = chan->tx_win;
3185 }
3186 
3187 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3188 {
3189 	struct l2cap_conf_req *req = data;
3190 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3191 	void *ptr = req->data;
3192 	u16 size;
3193 
3194 	BT_DBG("chan %p", chan);
3195 
3196 	if (chan->num_conf_req || chan->num_conf_rsp)
3197 		goto done;
3198 
3199 	switch (chan->mode) {
3200 	case L2CAP_MODE_STREAMING:
3201 	case L2CAP_MODE_ERTM:
3202 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3203 			break;
3204 
3205 		if (__l2cap_efs_supported(chan->conn))
3206 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3207 
3208 		/* fall through */
3209 	default:
3210 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3211 		break;
3212 	}
3213 
3214 done:
3215 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3216 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3217 
3218 	switch (chan->mode) {
3219 	case L2CAP_MODE_BASIC:
3220 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3221 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3222 			break;
3223 
3224 		rfc.mode            = L2CAP_MODE_BASIC;
3225 		rfc.txwin_size      = 0;
3226 		rfc.max_transmit    = 0;
3227 		rfc.retrans_timeout = 0;
3228 		rfc.monitor_timeout = 0;
3229 		rfc.max_pdu_size    = 0;
3230 
3231 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3232 				   (unsigned long) &rfc);
3233 		break;
3234 
3235 	case L2CAP_MODE_ERTM:
3236 		rfc.mode            = L2CAP_MODE_ERTM;
3237 		rfc.max_transmit    = chan->max_tx;
3238 
3239 		__l2cap_set_ertm_timeouts(chan, &rfc);
3240 
3241 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3242 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3243 			     L2CAP_FCS_SIZE);
3244 		rfc.max_pdu_size = cpu_to_le16(size);
3245 
3246 		l2cap_txwin_setup(chan);
3247 
3248 		rfc.txwin_size = min_t(u16, chan->tx_win,
3249 				       L2CAP_DEFAULT_TX_WINDOW);
3250 
3251 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 				   (unsigned long) &rfc);
3253 
3254 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 			l2cap_add_opt_efs(&ptr, chan);
3256 
3257 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3258 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3259 					   chan->tx_win);
3260 
3261 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3262 			if (chan->fcs == L2CAP_FCS_NONE ||
3263 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3264 				chan->fcs = L2CAP_FCS_NONE;
3265 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3266 						   chan->fcs);
3267 			}
3268 		break;
3269 
3270 	case L2CAP_MODE_STREAMING:
3271 		l2cap_txwin_setup(chan);
3272 		rfc.mode            = L2CAP_MODE_STREAMING;
3273 		rfc.txwin_size      = 0;
3274 		rfc.max_transmit    = 0;
3275 		rfc.retrans_timeout = 0;
3276 		rfc.monitor_timeout = 0;
3277 
3278 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3279 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3280 			     L2CAP_FCS_SIZE);
3281 		rfc.max_pdu_size = cpu_to_le16(size);
3282 
3283 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3284 				   (unsigned long) &rfc);
3285 
3286 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3287 			l2cap_add_opt_efs(&ptr, chan);
3288 
3289 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3290 			if (chan->fcs == L2CAP_FCS_NONE ||
3291 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3292 				chan->fcs = L2CAP_FCS_NONE;
3293 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3294 						   chan->fcs);
3295 			}
3296 		break;
3297 	}
3298 
3299 	req->dcid  = cpu_to_le16(chan->dcid);
3300 	req->flags = cpu_to_le16(0);
3301 
3302 	return ptr - data;
3303 }
3304 
3305 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3306 {
3307 	struct l2cap_conf_rsp *rsp = data;
3308 	void *ptr = rsp->data;
3309 	void *req = chan->conf_req;
3310 	int len = chan->conf_len;
3311 	int type, hint, olen;
3312 	unsigned long val;
3313 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3314 	struct l2cap_conf_efs efs;
3315 	u8 remote_efs = 0;
3316 	u16 mtu = L2CAP_DEFAULT_MTU;
3317 	u16 result = L2CAP_CONF_SUCCESS;
3318 	u16 size;
3319 
3320 	BT_DBG("chan %p", chan);
3321 
3322 	while (len >= L2CAP_CONF_OPT_SIZE) {
3323 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3324 
3325 		hint  = type & L2CAP_CONF_HINT;
3326 		type &= L2CAP_CONF_MASK;
3327 
3328 		switch (type) {
3329 		case L2CAP_CONF_MTU:
3330 			mtu = val;
3331 			break;
3332 
3333 		case L2CAP_CONF_FLUSH_TO:
3334 			chan->flush_to = val;
3335 			break;
3336 
3337 		case L2CAP_CONF_QOS:
3338 			break;
3339 
3340 		case L2CAP_CONF_RFC:
3341 			if (olen == sizeof(rfc))
3342 				memcpy(&rfc, (void *) val, olen);
3343 			break;
3344 
3345 		case L2CAP_CONF_FCS:
3346 			if (val == L2CAP_FCS_NONE)
3347 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3348 			break;
3349 
3350 		case L2CAP_CONF_EFS:
3351 			remote_efs = 1;
3352 			if (olen == sizeof(efs))
3353 				memcpy(&efs, (void *) val, olen);
3354 			break;
3355 
3356 		case L2CAP_CONF_EWS:
3357 			if (!chan->conn->hs_enabled)
3358 				return -ECONNREFUSED;
3359 
3360 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3361 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3362 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3363 			chan->remote_tx_win = val;
3364 			break;
3365 
3366 		default:
3367 			if (hint)
3368 				break;
3369 
3370 			result = L2CAP_CONF_UNKNOWN;
3371 			*((u8 *) ptr++) = type;
3372 			break;
3373 		}
3374 	}
3375 
3376 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3377 		goto done;
3378 
3379 	switch (chan->mode) {
3380 	case L2CAP_MODE_STREAMING:
3381 	case L2CAP_MODE_ERTM:
3382 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3383 			chan->mode = l2cap_select_mode(rfc.mode,
3384 						       chan->conn->feat_mask);
3385 			break;
3386 		}
3387 
3388 		if (remote_efs) {
3389 			if (__l2cap_efs_supported(chan->conn))
3390 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3391 			else
3392 				return -ECONNREFUSED;
3393 		}
3394 
3395 		if (chan->mode != rfc.mode)
3396 			return -ECONNREFUSED;
3397 
3398 		break;
3399 	}
3400 
3401 done:
3402 	if (chan->mode != rfc.mode) {
3403 		result = L2CAP_CONF_UNACCEPT;
3404 		rfc.mode = chan->mode;
3405 
3406 		if (chan->num_conf_rsp == 1)
3407 			return -ECONNREFUSED;
3408 
3409 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3410 				   (unsigned long) &rfc);
3411 	}
3412 
3413 	if (result == L2CAP_CONF_SUCCESS) {
3414 		/* Configure output options and let the other side know
3415 		 * which ones we don't like. */
3416 
3417 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3418 			result = L2CAP_CONF_UNACCEPT;
3419 		else {
3420 			chan->omtu = mtu;
3421 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3422 		}
3423 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3424 
3425 		if (remote_efs) {
3426 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 			    efs.stype != chan->local_stype) {
3429 
3430 				result = L2CAP_CONF_UNACCEPT;
3431 
3432 				if (chan->num_conf_req >= 1)
3433 					return -ECONNREFUSED;
3434 
3435 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3436 						   sizeof(efs),
3437 						   (unsigned long) &efs);
3438 			} else {
3439 				/* Send PENDING Conf Rsp */
3440 				result = L2CAP_CONF_PENDING;
3441 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3442 			}
3443 		}
3444 
3445 		switch (rfc.mode) {
3446 		case L2CAP_MODE_BASIC:
3447 			chan->fcs = L2CAP_FCS_NONE;
3448 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3449 			break;
3450 
3451 		case L2CAP_MODE_ERTM:
3452 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3453 				chan->remote_tx_win = rfc.txwin_size;
3454 			else
3455 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3456 
3457 			chan->remote_max_tx = rfc.max_transmit;
3458 
3459 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3460 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3461 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3462 			rfc.max_pdu_size = cpu_to_le16(size);
3463 			chan->remote_mps = size;
3464 
3465 			__l2cap_set_ertm_timeouts(chan, &rfc);
3466 
3467 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3468 
3469 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3470 					   sizeof(rfc), (unsigned long) &rfc);
3471 
3472 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3473 				chan->remote_id = efs.id;
3474 				chan->remote_stype = efs.stype;
3475 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3476 				chan->remote_flush_to =
3477 					le32_to_cpu(efs.flush_to);
3478 				chan->remote_acc_lat =
3479 					le32_to_cpu(efs.acc_lat);
3480 				chan->remote_sdu_itime =
3481 					le32_to_cpu(efs.sdu_itime);
3482 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3483 						   sizeof(efs),
3484 						   (unsigned long) &efs);
3485 			}
3486 			break;
3487 
3488 		case L2CAP_MODE_STREAMING:
3489 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3490 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3491 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3492 			rfc.max_pdu_size = cpu_to_le16(size);
3493 			chan->remote_mps = size;
3494 
3495 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3496 
3497 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3498 					   (unsigned long) &rfc);
3499 
3500 			break;
3501 
3502 		default:
3503 			result = L2CAP_CONF_UNACCEPT;
3504 
3505 			memset(&rfc, 0, sizeof(rfc));
3506 			rfc.mode = chan->mode;
3507 		}
3508 
3509 		if (result == L2CAP_CONF_SUCCESS)
3510 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3511 	}
3512 	rsp->scid   = cpu_to_le16(chan->dcid);
3513 	rsp->result = cpu_to_le16(result);
3514 	rsp->flags  = cpu_to_le16(0);
3515 
3516 	return ptr - data;
3517 }
3518 
3519 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3520 				void *data, u16 *result)
3521 {
3522 	struct l2cap_conf_req *req = data;
3523 	void *ptr = req->data;
3524 	int type, olen;
3525 	unsigned long val;
3526 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3527 	struct l2cap_conf_efs efs;
3528 
3529 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3530 
3531 	while (len >= L2CAP_CONF_OPT_SIZE) {
3532 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3533 
3534 		switch (type) {
3535 		case L2CAP_CONF_MTU:
3536 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3537 				*result = L2CAP_CONF_UNACCEPT;
3538 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3539 			} else
3540 				chan->imtu = val;
3541 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3542 			break;
3543 
3544 		case L2CAP_CONF_FLUSH_TO:
3545 			chan->flush_to = val;
3546 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3547 					   2, chan->flush_to);
3548 			break;
3549 
3550 		case L2CAP_CONF_RFC:
3551 			if (olen == sizeof(rfc))
3552 				memcpy(&rfc, (void *)val, olen);
3553 
3554 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3555 			    rfc.mode != chan->mode)
3556 				return -ECONNREFUSED;
3557 
3558 			chan->fcs = 0;
3559 
3560 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3561 					   sizeof(rfc), (unsigned long) &rfc);
3562 			break;
3563 
3564 		case L2CAP_CONF_EWS:
3565 			chan->ack_win = min_t(u16, val, chan->ack_win);
3566 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3567 					   chan->tx_win);
3568 			break;
3569 
3570 		case L2CAP_CONF_EFS:
3571 			if (olen == sizeof(efs))
3572 				memcpy(&efs, (void *)val, olen);
3573 
3574 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3575 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3576 			    efs.stype != chan->local_stype)
3577 				return -ECONNREFUSED;
3578 
3579 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3580 					   (unsigned long) &efs);
3581 			break;
3582 
3583 		case L2CAP_CONF_FCS:
3584 			if (*result == L2CAP_CONF_PENDING)
3585 				if (val == L2CAP_FCS_NONE)
3586 					set_bit(CONF_RECV_NO_FCS,
3587 						&chan->conf_state);
3588 			break;
3589 		}
3590 	}
3591 
3592 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3593 		return -ECONNREFUSED;
3594 
3595 	chan->mode = rfc.mode;
3596 
3597 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3598 		switch (rfc.mode) {
3599 		case L2CAP_MODE_ERTM:
3600 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3601 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3602 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3603 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3604 				chan->ack_win = min_t(u16, chan->ack_win,
3605 						      rfc.txwin_size);
3606 
3607 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3608 				chan->local_msdu = le16_to_cpu(efs.msdu);
3609 				chan->local_sdu_itime =
3610 					le32_to_cpu(efs.sdu_itime);
3611 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3612 				chan->local_flush_to =
3613 					le32_to_cpu(efs.flush_to);
3614 			}
3615 			break;
3616 
3617 		case L2CAP_MODE_STREAMING:
3618 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3619 		}
3620 	}
3621 
3622 	req->dcid   = cpu_to_le16(chan->dcid);
3623 	req->flags  = cpu_to_le16(0);
3624 
3625 	return ptr - data;
3626 }
3627 
3628 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3629 				u16 result, u16 flags)
3630 {
3631 	struct l2cap_conf_rsp *rsp = data;
3632 	void *ptr = rsp->data;
3633 
3634 	BT_DBG("chan %p", chan);
3635 
3636 	rsp->scid   = cpu_to_le16(chan->dcid);
3637 	rsp->result = cpu_to_le16(result);
3638 	rsp->flags  = cpu_to_le16(flags);
3639 
3640 	return ptr - data;
3641 }
3642 
3643 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3644 {
3645 	struct l2cap_le_conn_rsp rsp;
3646 	struct l2cap_conn *conn = chan->conn;
3647 
3648 	BT_DBG("chan %p", chan);
3649 
3650 	rsp.dcid    = cpu_to_le16(chan->scid);
3651 	rsp.mtu     = cpu_to_le16(chan->imtu);
3652 	rsp.mps     = cpu_to_le16(chan->mps);
3653 	rsp.credits = cpu_to_le16(chan->rx_credits);
3654 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3655 
3656 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3657 		       &rsp);
3658 }
3659 
3660 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3661 {
3662 	struct l2cap_conn_rsp rsp;
3663 	struct l2cap_conn *conn = chan->conn;
3664 	u8 buf[128];
3665 	u8 rsp_code;
3666 
3667 	rsp.scid   = cpu_to_le16(chan->dcid);
3668 	rsp.dcid   = cpu_to_le16(chan->scid);
3669 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3670 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3671 
3672 	if (chan->hs_hcon)
3673 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3674 	else
3675 		rsp_code = L2CAP_CONN_RSP;
3676 
3677 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3678 
3679 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3680 
3681 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3682 		return;
3683 
3684 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3685 		       l2cap_build_conf_req(chan, buf), buf);
3686 	chan->num_conf_req++;
3687 }
3688 
3689 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3690 {
3691 	int type, olen;
3692 	unsigned long val;
3693 	/* Use sane default values in case a misbehaving remote device
3694 	 * did not send an RFC or extended window size option.
3695 	 */
3696 	u16 txwin_ext = chan->ack_win;
3697 	struct l2cap_conf_rfc rfc = {
3698 		.mode = chan->mode,
3699 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3700 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3701 		.max_pdu_size = cpu_to_le16(chan->imtu),
3702 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3703 	};
3704 
3705 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3706 
3707 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3708 		return;
3709 
3710 	while (len >= L2CAP_CONF_OPT_SIZE) {
3711 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3712 
3713 		switch (type) {
3714 		case L2CAP_CONF_RFC:
3715 			if (olen == sizeof(rfc))
3716 				memcpy(&rfc, (void *)val, olen);
3717 			break;
3718 		case L2CAP_CONF_EWS:
3719 			txwin_ext = val;
3720 			break;
3721 		}
3722 	}
3723 
3724 	switch (rfc.mode) {
3725 	case L2CAP_MODE_ERTM:
3726 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3727 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3728 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3729 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3730 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3731 		else
3732 			chan->ack_win = min_t(u16, chan->ack_win,
3733 					      rfc.txwin_size);
3734 		break;
3735 	case L2CAP_MODE_STREAMING:
3736 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3737 	}
3738 }
3739 
3740 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3741 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3742 				    u8 *data)
3743 {
3744 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3745 
3746 	if (cmd_len < sizeof(*rej))
3747 		return -EPROTO;
3748 
3749 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3750 		return 0;
3751 
3752 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3753 	    cmd->ident == conn->info_ident) {
3754 		cancel_delayed_work(&conn->info_timer);
3755 
3756 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3757 		conn->info_ident = 0;
3758 
3759 		l2cap_conn_start(conn);
3760 	}
3761 
3762 	return 0;
3763 }
3764 
3765 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3766 					struct l2cap_cmd_hdr *cmd,
3767 					u8 *data, u8 rsp_code, u8 amp_id)
3768 {
3769 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3770 	struct l2cap_conn_rsp rsp;
3771 	struct l2cap_chan *chan = NULL, *pchan;
3772 	int result, status = L2CAP_CS_NO_INFO;
3773 
3774 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3775 	__le16 psm = req->psm;
3776 
3777 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3778 
3779 	/* Check if we have socket listening on psm */
3780 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3781 					 &conn->hcon->dst, ACL_LINK);
3782 	if (!pchan) {
3783 		result = L2CAP_CR_BAD_PSM;
3784 		goto sendresp;
3785 	}
3786 
3787 	mutex_lock(&conn->chan_lock);
3788 	l2cap_chan_lock(pchan);
3789 
3790 	/* Check if the ACL is secure enough (if not SDP) */
3791 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3792 	    !hci_conn_check_link_mode(conn->hcon)) {
3793 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3794 		result = L2CAP_CR_SEC_BLOCK;
3795 		goto response;
3796 	}
3797 
3798 	result = L2CAP_CR_NO_MEM;
3799 
3800 	/* Check if we already have channel with that dcid */
3801 	if (__l2cap_get_chan_by_dcid(conn, scid))
3802 		goto response;
3803 
3804 	chan = pchan->ops->new_connection(pchan);
3805 	if (!chan)
3806 		goto response;
3807 
3808 	/* For certain devices (ex: HID mouse), support for authentication,
3809 	 * pairing and bonding is optional. For such devices, inorder to avoid
3810 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3811 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3812 	 */
3813 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3814 
3815 	bacpy(&chan->src, &conn->hcon->src);
3816 	bacpy(&chan->dst, &conn->hcon->dst);
3817 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3818 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3819 	chan->psm  = psm;
3820 	chan->dcid = scid;
3821 	chan->local_amp_id = amp_id;
3822 
3823 	__l2cap_chan_add(conn, chan);
3824 
3825 	dcid = chan->scid;
3826 
3827 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3828 
3829 	chan->ident = cmd->ident;
3830 
3831 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3832 		if (l2cap_chan_check_security(chan)) {
3833 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3834 				l2cap_state_change(chan, BT_CONNECT2);
3835 				result = L2CAP_CR_PEND;
3836 				status = L2CAP_CS_AUTHOR_PEND;
3837 				chan->ops->defer(chan);
3838 			} else {
3839 				/* Force pending result for AMP controllers.
3840 				 * The connection will succeed after the
3841 				 * physical link is up.
3842 				 */
3843 				if (amp_id == AMP_ID_BREDR) {
3844 					l2cap_state_change(chan, BT_CONFIG);
3845 					result = L2CAP_CR_SUCCESS;
3846 				} else {
3847 					l2cap_state_change(chan, BT_CONNECT2);
3848 					result = L2CAP_CR_PEND;
3849 				}
3850 				status = L2CAP_CS_NO_INFO;
3851 			}
3852 		} else {
3853 			l2cap_state_change(chan, BT_CONNECT2);
3854 			result = L2CAP_CR_PEND;
3855 			status = L2CAP_CS_AUTHEN_PEND;
3856 		}
3857 	} else {
3858 		l2cap_state_change(chan, BT_CONNECT2);
3859 		result = L2CAP_CR_PEND;
3860 		status = L2CAP_CS_NO_INFO;
3861 	}
3862 
3863 response:
3864 	l2cap_chan_unlock(pchan);
3865 	mutex_unlock(&conn->chan_lock);
3866 
3867 sendresp:
3868 	rsp.scid   = cpu_to_le16(scid);
3869 	rsp.dcid   = cpu_to_le16(dcid);
3870 	rsp.result = cpu_to_le16(result);
3871 	rsp.status = cpu_to_le16(status);
3872 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3873 
3874 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3875 		struct l2cap_info_req info;
3876 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3877 
3878 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3879 		conn->info_ident = l2cap_get_ident(conn);
3880 
3881 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3882 
3883 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3884 			       sizeof(info), &info);
3885 	}
3886 
3887 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3888 	    result == L2CAP_CR_SUCCESS) {
3889 		u8 buf[128];
3890 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3891 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3892 			       l2cap_build_conf_req(chan, buf), buf);
3893 		chan->num_conf_req++;
3894 	}
3895 
3896 	return chan;
3897 }
3898 
3899 static int l2cap_connect_req(struct l2cap_conn *conn,
3900 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3901 {
3902 	struct hci_dev *hdev = conn->hcon->hdev;
3903 	struct hci_conn *hcon = conn->hcon;
3904 
3905 	if (cmd_len < sizeof(struct l2cap_conn_req))
3906 		return -EPROTO;
3907 
3908 	hci_dev_lock(hdev);
3909 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3910 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3911 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3912 				      hcon->dst_type, 0, NULL, 0,
3913 				      hcon->dev_class);
3914 	hci_dev_unlock(hdev);
3915 
3916 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3917 	return 0;
3918 }
3919 
3920 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3921 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3922 				    u8 *data)
3923 {
3924 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3925 	u16 scid, dcid, result, status;
3926 	struct l2cap_chan *chan;
3927 	u8 req[128];
3928 	int err;
3929 
3930 	if (cmd_len < sizeof(*rsp))
3931 		return -EPROTO;
3932 
3933 	scid   = __le16_to_cpu(rsp->scid);
3934 	dcid   = __le16_to_cpu(rsp->dcid);
3935 	result = __le16_to_cpu(rsp->result);
3936 	status = __le16_to_cpu(rsp->status);
3937 
3938 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3939 	       dcid, scid, result, status);
3940 
3941 	mutex_lock(&conn->chan_lock);
3942 
3943 	if (scid) {
3944 		chan = __l2cap_get_chan_by_scid(conn, scid);
3945 		if (!chan) {
3946 			err = -EBADSLT;
3947 			goto unlock;
3948 		}
3949 	} else {
3950 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3951 		if (!chan) {
3952 			err = -EBADSLT;
3953 			goto unlock;
3954 		}
3955 	}
3956 
3957 	err = 0;
3958 
3959 	l2cap_chan_lock(chan);
3960 
3961 	switch (result) {
3962 	case L2CAP_CR_SUCCESS:
3963 		l2cap_state_change(chan, BT_CONFIG);
3964 		chan->ident = 0;
3965 		chan->dcid = dcid;
3966 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3967 
3968 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3969 			break;
3970 
3971 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3972 			       l2cap_build_conf_req(chan, req), req);
3973 		chan->num_conf_req++;
3974 		break;
3975 
3976 	case L2CAP_CR_PEND:
3977 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3978 		break;
3979 
3980 	default:
3981 		l2cap_chan_del(chan, ECONNREFUSED);
3982 		break;
3983 	}
3984 
3985 	l2cap_chan_unlock(chan);
3986 
3987 unlock:
3988 	mutex_unlock(&conn->chan_lock);
3989 
3990 	return err;
3991 }
3992 
3993 static inline void set_default_fcs(struct l2cap_chan *chan)
3994 {
3995 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3996 	 * sides request it.
3997 	 */
3998 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3999 		chan->fcs = L2CAP_FCS_NONE;
4000 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4001 		chan->fcs = L2CAP_FCS_CRC16;
4002 }
4003 
4004 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4005 				    u8 ident, u16 flags)
4006 {
4007 	struct l2cap_conn *conn = chan->conn;
4008 
4009 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4010 	       flags);
4011 
4012 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4013 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4014 
4015 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4016 		       l2cap_build_conf_rsp(chan, data,
4017 					    L2CAP_CONF_SUCCESS, flags), data);
4018 }
4019 
4020 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4021 				   u16 scid, u16 dcid)
4022 {
4023 	struct l2cap_cmd_rej_cid rej;
4024 
4025 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4026 	rej.scid = __cpu_to_le16(scid);
4027 	rej.dcid = __cpu_to_le16(dcid);
4028 
4029 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4030 }
4031 
4032 static inline int l2cap_config_req(struct l2cap_conn *conn,
4033 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4034 				   u8 *data)
4035 {
4036 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4037 	u16 dcid, flags;
4038 	u8 rsp[64];
4039 	struct l2cap_chan *chan;
4040 	int len, err = 0;
4041 
4042 	if (cmd_len < sizeof(*req))
4043 		return -EPROTO;
4044 
4045 	dcid  = __le16_to_cpu(req->dcid);
4046 	flags = __le16_to_cpu(req->flags);
4047 
4048 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4049 
4050 	chan = l2cap_get_chan_by_scid(conn, dcid);
4051 	if (!chan) {
4052 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4053 		return 0;
4054 	}
4055 
4056 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4057 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4058 				       chan->dcid);
4059 		goto unlock;
4060 	}
4061 
4062 	/* Reject if config buffer is too small. */
4063 	len = cmd_len - sizeof(*req);
4064 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4065 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4066 			       l2cap_build_conf_rsp(chan, rsp,
4067 			       L2CAP_CONF_REJECT, flags), rsp);
4068 		goto unlock;
4069 	}
4070 
4071 	/* Store config. */
4072 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4073 	chan->conf_len += len;
4074 
4075 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4076 		/* Incomplete config. Send empty response. */
4077 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4078 			       l2cap_build_conf_rsp(chan, rsp,
4079 			       L2CAP_CONF_SUCCESS, flags), rsp);
4080 		goto unlock;
4081 	}
4082 
4083 	/* Complete config. */
4084 	len = l2cap_parse_conf_req(chan, rsp);
4085 	if (len < 0) {
4086 		l2cap_send_disconn_req(chan, ECONNRESET);
4087 		goto unlock;
4088 	}
4089 
4090 	chan->ident = cmd->ident;
4091 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4092 	chan->num_conf_rsp++;
4093 
4094 	/* Reset config buffer. */
4095 	chan->conf_len = 0;
4096 
4097 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4098 		goto unlock;
4099 
4100 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4101 		set_default_fcs(chan);
4102 
4103 		if (chan->mode == L2CAP_MODE_ERTM ||
4104 		    chan->mode == L2CAP_MODE_STREAMING)
4105 			err = l2cap_ertm_init(chan);
4106 
4107 		if (err < 0)
4108 			l2cap_send_disconn_req(chan, -err);
4109 		else
4110 			l2cap_chan_ready(chan);
4111 
4112 		goto unlock;
4113 	}
4114 
4115 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4116 		u8 buf[64];
4117 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4118 			       l2cap_build_conf_req(chan, buf), buf);
4119 		chan->num_conf_req++;
4120 	}
4121 
4122 	/* Got Conf Rsp PENDING from remote side and asume we sent
4123 	   Conf Rsp PENDING in the code above */
4124 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4125 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4126 
4127 		/* check compatibility */
4128 
4129 		/* Send rsp for BR/EDR channel */
4130 		if (!chan->hs_hcon)
4131 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4132 		else
4133 			chan->ident = cmd->ident;
4134 	}
4135 
4136 unlock:
4137 	l2cap_chan_unlock(chan);
4138 	return err;
4139 }
4140 
4141 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4142 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4143 				   u8 *data)
4144 {
4145 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4146 	u16 scid, flags, result;
4147 	struct l2cap_chan *chan;
4148 	int len = cmd_len - sizeof(*rsp);
4149 	int err = 0;
4150 
4151 	if (cmd_len < sizeof(*rsp))
4152 		return -EPROTO;
4153 
4154 	scid   = __le16_to_cpu(rsp->scid);
4155 	flags  = __le16_to_cpu(rsp->flags);
4156 	result = __le16_to_cpu(rsp->result);
4157 
4158 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4159 	       result, len);
4160 
4161 	chan = l2cap_get_chan_by_scid(conn, scid);
4162 	if (!chan)
4163 		return 0;
4164 
4165 	switch (result) {
4166 	case L2CAP_CONF_SUCCESS:
4167 		l2cap_conf_rfc_get(chan, rsp->data, len);
4168 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4169 		break;
4170 
4171 	case L2CAP_CONF_PENDING:
4172 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4173 
4174 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4175 			char buf[64];
4176 
4177 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4178 						   buf, &result);
4179 			if (len < 0) {
4180 				l2cap_send_disconn_req(chan, ECONNRESET);
4181 				goto done;
4182 			}
4183 
4184 			if (!chan->hs_hcon) {
4185 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4186 							0);
4187 			} else {
4188 				if (l2cap_check_efs(chan)) {
4189 					amp_create_logical_link(chan);
4190 					chan->ident = cmd->ident;
4191 				}
4192 			}
4193 		}
4194 		goto done;
4195 
4196 	case L2CAP_CONF_UNACCEPT:
4197 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4198 			char req[64];
4199 
4200 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4201 				l2cap_send_disconn_req(chan, ECONNRESET);
4202 				goto done;
4203 			}
4204 
4205 			/* throw out any old stored conf requests */
4206 			result = L2CAP_CONF_SUCCESS;
4207 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4208 						   req, &result);
4209 			if (len < 0) {
4210 				l2cap_send_disconn_req(chan, ECONNRESET);
4211 				goto done;
4212 			}
4213 
4214 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4215 				       L2CAP_CONF_REQ, len, req);
4216 			chan->num_conf_req++;
4217 			if (result != L2CAP_CONF_SUCCESS)
4218 				goto done;
4219 			break;
4220 		}
4221 
4222 	default:
4223 		l2cap_chan_set_err(chan, ECONNRESET);
4224 
4225 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4226 		l2cap_send_disconn_req(chan, ECONNRESET);
4227 		goto done;
4228 	}
4229 
4230 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4231 		goto done;
4232 
4233 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4234 
4235 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4236 		set_default_fcs(chan);
4237 
4238 		if (chan->mode == L2CAP_MODE_ERTM ||
4239 		    chan->mode == L2CAP_MODE_STREAMING)
4240 			err = l2cap_ertm_init(chan);
4241 
4242 		if (err < 0)
4243 			l2cap_send_disconn_req(chan, -err);
4244 		else
4245 			l2cap_chan_ready(chan);
4246 	}
4247 
4248 done:
4249 	l2cap_chan_unlock(chan);
4250 	return err;
4251 }
4252 
4253 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4254 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4255 				       u8 *data)
4256 {
4257 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4258 	struct l2cap_disconn_rsp rsp;
4259 	u16 dcid, scid;
4260 	struct l2cap_chan *chan;
4261 
4262 	if (cmd_len != sizeof(*req))
4263 		return -EPROTO;
4264 
4265 	scid = __le16_to_cpu(req->scid);
4266 	dcid = __le16_to_cpu(req->dcid);
4267 
4268 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4269 
4270 	mutex_lock(&conn->chan_lock);
4271 
4272 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4273 	if (!chan) {
4274 		mutex_unlock(&conn->chan_lock);
4275 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4276 		return 0;
4277 	}
4278 
4279 	l2cap_chan_lock(chan);
4280 
4281 	rsp.dcid = cpu_to_le16(chan->scid);
4282 	rsp.scid = cpu_to_le16(chan->dcid);
4283 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4284 
4285 	chan->ops->set_shutdown(chan);
4286 
4287 	l2cap_chan_hold(chan);
4288 	l2cap_chan_del(chan, ECONNRESET);
4289 
4290 	l2cap_chan_unlock(chan);
4291 
4292 	chan->ops->close(chan);
4293 	l2cap_chan_put(chan);
4294 
4295 	mutex_unlock(&conn->chan_lock);
4296 
4297 	return 0;
4298 }
4299 
4300 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4301 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4302 				       u8 *data)
4303 {
4304 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4305 	u16 dcid, scid;
4306 	struct l2cap_chan *chan;
4307 
4308 	if (cmd_len != sizeof(*rsp))
4309 		return -EPROTO;
4310 
4311 	scid = __le16_to_cpu(rsp->scid);
4312 	dcid = __le16_to_cpu(rsp->dcid);
4313 
4314 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4315 
4316 	mutex_lock(&conn->chan_lock);
4317 
4318 	chan = __l2cap_get_chan_by_scid(conn, scid);
4319 	if (!chan) {
4320 		mutex_unlock(&conn->chan_lock);
4321 		return 0;
4322 	}
4323 
4324 	l2cap_chan_lock(chan);
4325 
4326 	l2cap_chan_hold(chan);
4327 	l2cap_chan_del(chan, 0);
4328 
4329 	l2cap_chan_unlock(chan);
4330 
4331 	chan->ops->close(chan);
4332 	l2cap_chan_put(chan);
4333 
4334 	mutex_unlock(&conn->chan_lock);
4335 
4336 	return 0;
4337 }
4338 
4339 static inline int l2cap_information_req(struct l2cap_conn *conn,
4340 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4341 					u8 *data)
4342 {
4343 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4344 	u16 type;
4345 
4346 	if (cmd_len != sizeof(*req))
4347 		return -EPROTO;
4348 
4349 	type = __le16_to_cpu(req->type);
4350 
4351 	BT_DBG("type 0x%4.4x", type);
4352 
4353 	if (type == L2CAP_IT_FEAT_MASK) {
4354 		u8 buf[8];
4355 		u32 feat_mask = l2cap_feat_mask;
4356 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4357 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4358 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4359 		if (!disable_ertm)
4360 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4361 				| L2CAP_FEAT_FCS;
4362 		if (conn->hs_enabled)
4363 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4364 				| L2CAP_FEAT_EXT_WINDOW;
4365 
4366 		put_unaligned_le32(feat_mask, rsp->data);
4367 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4368 			       buf);
4369 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4370 		u8 buf[12];
4371 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4372 
4373 		if (conn->hs_enabled)
4374 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4375 		else
4376 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4377 
4378 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4379 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4380 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4381 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4382 			       buf);
4383 	} else {
4384 		struct l2cap_info_rsp rsp;
4385 		rsp.type   = cpu_to_le16(type);
4386 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4387 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4388 			       &rsp);
4389 	}
4390 
4391 	return 0;
4392 }
4393 
4394 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4395 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4396 					u8 *data)
4397 {
4398 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4399 	u16 type, result;
4400 
4401 	if (cmd_len < sizeof(*rsp))
4402 		return -EPROTO;
4403 
4404 	type   = __le16_to_cpu(rsp->type);
4405 	result = __le16_to_cpu(rsp->result);
4406 
4407 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4408 
4409 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4410 	if (cmd->ident != conn->info_ident ||
4411 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4412 		return 0;
4413 
4414 	cancel_delayed_work(&conn->info_timer);
4415 
4416 	if (result != L2CAP_IR_SUCCESS) {
4417 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4418 		conn->info_ident = 0;
4419 
4420 		l2cap_conn_start(conn);
4421 
4422 		return 0;
4423 	}
4424 
4425 	switch (type) {
4426 	case L2CAP_IT_FEAT_MASK:
4427 		conn->feat_mask = get_unaligned_le32(rsp->data);
4428 
4429 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4430 			struct l2cap_info_req req;
4431 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4432 
4433 			conn->info_ident = l2cap_get_ident(conn);
4434 
4435 			l2cap_send_cmd(conn, conn->info_ident,
4436 				       L2CAP_INFO_REQ, sizeof(req), &req);
4437 		} else {
4438 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4439 			conn->info_ident = 0;
4440 
4441 			l2cap_conn_start(conn);
4442 		}
4443 		break;
4444 
4445 	case L2CAP_IT_FIXED_CHAN:
4446 		conn->fixed_chan_mask = rsp->data[0];
4447 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4448 		conn->info_ident = 0;
4449 
4450 		l2cap_conn_start(conn);
4451 		break;
4452 	}
4453 
4454 	return 0;
4455 }
4456 
4457 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4458 				    struct l2cap_cmd_hdr *cmd,
4459 				    u16 cmd_len, void *data)
4460 {
4461 	struct l2cap_create_chan_req *req = data;
4462 	struct l2cap_create_chan_rsp rsp;
4463 	struct l2cap_chan *chan;
4464 	struct hci_dev *hdev;
4465 	u16 psm, scid;
4466 
4467 	if (cmd_len != sizeof(*req))
4468 		return -EPROTO;
4469 
4470 	if (!conn->hs_enabled)
4471 		return -EINVAL;
4472 
4473 	psm = le16_to_cpu(req->psm);
4474 	scid = le16_to_cpu(req->scid);
4475 
4476 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4477 
4478 	/* For controller id 0 make BR/EDR connection */
4479 	if (req->amp_id == AMP_ID_BREDR) {
4480 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4481 			      req->amp_id);
4482 		return 0;
4483 	}
4484 
4485 	/* Validate AMP controller id */
4486 	hdev = hci_dev_get(req->amp_id);
4487 	if (!hdev)
4488 		goto error;
4489 
4490 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4491 		hci_dev_put(hdev);
4492 		goto error;
4493 	}
4494 
4495 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4496 			     req->amp_id);
4497 	if (chan) {
4498 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4499 		struct hci_conn *hs_hcon;
4500 
4501 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4502 						  &conn->hcon->dst);
4503 		if (!hs_hcon) {
4504 			hci_dev_put(hdev);
4505 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4506 					       chan->dcid);
4507 			return 0;
4508 		}
4509 
4510 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4511 
4512 		mgr->bredr_chan = chan;
4513 		chan->hs_hcon = hs_hcon;
4514 		chan->fcs = L2CAP_FCS_NONE;
4515 		conn->mtu = hdev->block_mtu;
4516 	}
4517 
4518 	hci_dev_put(hdev);
4519 
4520 	return 0;
4521 
4522 error:
4523 	rsp.dcid = 0;
4524 	rsp.scid = cpu_to_le16(scid);
4525 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4526 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4527 
4528 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4529 		       sizeof(rsp), &rsp);
4530 
4531 	return 0;
4532 }
4533 
4534 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4535 {
4536 	struct l2cap_move_chan_req req;
4537 	u8 ident;
4538 
4539 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4540 
4541 	ident = l2cap_get_ident(chan->conn);
4542 	chan->ident = ident;
4543 
4544 	req.icid = cpu_to_le16(chan->scid);
4545 	req.dest_amp_id = dest_amp_id;
4546 
4547 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4548 		       &req);
4549 
4550 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4551 }
4552 
4553 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4554 {
4555 	struct l2cap_move_chan_rsp rsp;
4556 
4557 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4558 
4559 	rsp.icid = cpu_to_le16(chan->dcid);
4560 	rsp.result = cpu_to_le16(result);
4561 
4562 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4563 		       sizeof(rsp), &rsp);
4564 }
4565 
4566 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4567 {
4568 	struct l2cap_move_chan_cfm cfm;
4569 
4570 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4571 
4572 	chan->ident = l2cap_get_ident(chan->conn);
4573 
4574 	cfm.icid = cpu_to_le16(chan->scid);
4575 	cfm.result = cpu_to_le16(result);
4576 
4577 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4578 		       sizeof(cfm), &cfm);
4579 
4580 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4581 }
4582 
4583 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4584 {
4585 	struct l2cap_move_chan_cfm cfm;
4586 
4587 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4588 
4589 	cfm.icid = cpu_to_le16(icid);
4590 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4591 
4592 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4593 		       sizeof(cfm), &cfm);
4594 }
4595 
4596 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4597 					 u16 icid)
4598 {
4599 	struct l2cap_move_chan_cfm_rsp rsp;
4600 
4601 	BT_DBG("icid 0x%4.4x", icid);
4602 
4603 	rsp.icid = cpu_to_le16(icid);
4604 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4605 }
4606 
4607 static void __release_logical_link(struct l2cap_chan *chan)
4608 {
4609 	chan->hs_hchan = NULL;
4610 	chan->hs_hcon = NULL;
4611 
4612 	/* Placeholder - release the logical link */
4613 }
4614 
4615 static void l2cap_logical_fail(struct l2cap_chan *chan)
4616 {
4617 	/* Logical link setup failed */
4618 	if (chan->state != BT_CONNECTED) {
4619 		/* Create channel failure, disconnect */
4620 		l2cap_send_disconn_req(chan, ECONNRESET);
4621 		return;
4622 	}
4623 
4624 	switch (chan->move_role) {
4625 	case L2CAP_MOVE_ROLE_RESPONDER:
4626 		l2cap_move_done(chan);
4627 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4628 		break;
4629 	case L2CAP_MOVE_ROLE_INITIATOR:
4630 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4631 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4632 			/* Remote has only sent pending or
4633 			 * success responses, clean up
4634 			 */
4635 			l2cap_move_done(chan);
4636 		}
4637 
4638 		/* Other amp move states imply that the move
4639 		 * has already aborted
4640 		 */
4641 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4642 		break;
4643 	}
4644 }
4645 
4646 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4647 					struct hci_chan *hchan)
4648 {
4649 	struct l2cap_conf_rsp rsp;
4650 
4651 	chan->hs_hchan = hchan;
4652 	chan->hs_hcon->l2cap_data = chan->conn;
4653 
4654 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4655 
4656 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4657 		int err;
4658 
4659 		set_default_fcs(chan);
4660 
4661 		err = l2cap_ertm_init(chan);
4662 		if (err < 0)
4663 			l2cap_send_disconn_req(chan, -err);
4664 		else
4665 			l2cap_chan_ready(chan);
4666 	}
4667 }
4668 
4669 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4670 				      struct hci_chan *hchan)
4671 {
4672 	chan->hs_hcon = hchan->conn;
4673 	chan->hs_hcon->l2cap_data = chan->conn;
4674 
4675 	BT_DBG("move_state %d", chan->move_state);
4676 
4677 	switch (chan->move_state) {
4678 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4679 		/* Move confirm will be sent after a success
4680 		 * response is received
4681 		 */
4682 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4683 		break;
4684 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4685 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4686 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4687 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4688 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4689 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4690 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4691 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4692 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4693 		}
4694 		break;
4695 	default:
4696 		/* Move was not in expected state, free the channel */
4697 		__release_logical_link(chan);
4698 
4699 		chan->move_state = L2CAP_MOVE_STABLE;
4700 	}
4701 }
4702 
4703 /* Call with chan locked */
4704 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4705 		       u8 status)
4706 {
4707 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4708 
4709 	if (status) {
4710 		l2cap_logical_fail(chan);
4711 		__release_logical_link(chan);
4712 		return;
4713 	}
4714 
4715 	if (chan->state != BT_CONNECTED) {
4716 		/* Ignore logical link if channel is on BR/EDR */
4717 		if (chan->local_amp_id != AMP_ID_BREDR)
4718 			l2cap_logical_finish_create(chan, hchan);
4719 	} else {
4720 		l2cap_logical_finish_move(chan, hchan);
4721 	}
4722 }
4723 
4724 void l2cap_move_start(struct l2cap_chan *chan)
4725 {
4726 	BT_DBG("chan %p", chan);
4727 
4728 	if (chan->local_amp_id == AMP_ID_BREDR) {
4729 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4730 			return;
4731 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4732 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4733 		/* Placeholder - start physical link setup */
4734 	} else {
4735 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4736 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4737 		chan->move_id = 0;
4738 		l2cap_move_setup(chan);
4739 		l2cap_send_move_chan_req(chan, 0);
4740 	}
4741 }
4742 
4743 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4744 			    u8 local_amp_id, u8 remote_amp_id)
4745 {
4746 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4747 	       local_amp_id, remote_amp_id);
4748 
4749 	chan->fcs = L2CAP_FCS_NONE;
4750 
4751 	/* Outgoing channel on AMP */
4752 	if (chan->state == BT_CONNECT) {
4753 		if (result == L2CAP_CR_SUCCESS) {
4754 			chan->local_amp_id = local_amp_id;
4755 			l2cap_send_create_chan_req(chan, remote_amp_id);
4756 		} else {
4757 			/* Revert to BR/EDR connect */
4758 			l2cap_send_conn_req(chan);
4759 		}
4760 
4761 		return;
4762 	}
4763 
4764 	/* Incoming channel on AMP */
4765 	if (__l2cap_no_conn_pending(chan)) {
4766 		struct l2cap_conn_rsp rsp;
4767 		char buf[128];
4768 		rsp.scid = cpu_to_le16(chan->dcid);
4769 		rsp.dcid = cpu_to_le16(chan->scid);
4770 
4771 		if (result == L2CAP_CR_SUCCESS) {
4772 			/* Send successful response */
4773 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4774 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4775 		} else {
4776 			/* Send negative response */
4777 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4778 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4779 		}
4780 
4781 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4782 			       sizeof(rsp), &rsp);
4783 
4784 		if (result == L2CAP_CR_SUCCESS) {
4785 			l2cap_state_change(chan, BT_CONFIG);
4786 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4787 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4788 				       L2CAP_CONF_REQ,
4789 				       l2cap_build_conf_req(chan, buf), buf);
4790 			chan->num_conf_req++;
4791 		}
4792 	}
4793 }
4794 
4795 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4796 				   u8 remote_amp_id)
4797 {
4798 	l2cap_move_setup(chan);
4799 	chan->move_id = local_amp_id;
4800 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4801 
4802 	l2cap_send_move_chan_req(chan, remote_amp_id);
4803 }
4804 
4805 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4806 {
4807 	struct hci_chan *hchan = NULL;
4808 
4809 	/* Placeholder - get hci_chan for logical link */
4810 
4811 	if (hchan) {
4812 		if (hchan->state == BT_CONNECTED) {
4813 			/* Logical link is ready to go */
4814 			chan->hs_hcon = hchan->conn;
4815 			chan->hs_hcon->l2cap_data = chan->conn;
4816 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4817 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4818 
4819 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4820 		} else {
4821 			/* Wait for logical link to be ready */
4822 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4823 		}
4824 	} else {
4825 		/* Logical link not available */
4826 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4827 	}
4828 }
4829 
4830 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4831 {
4832 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4833 		u8 rsp_result;
4834 		if (result == -EINVAL)
4835 			rsp_result = L2CAP_MR_BAD_ID;
4836 		else
4837 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4838 
4839 		l2cap_send_move_chan_rsp(chan, rsp_result);
4840 	}
4841 
4842 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4843 	chan->move_state = L2CAP_MOVE_STABLE;
4844 
4845 	/* Restart data transmission */
4846 	l2cap_ertm_send(chan);
4847 }
4848 
4849 /* Invoke with locked chan */
4850 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4851 {
4852 	u8 local_amp_id = chan->local_amp_id;
4853 	u8 remote_amp_id = chan->remote_amp_id;
4854 
4855 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4856 	       chan, result, local_amp_id, remote_amp_id);
4857 
4858 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4859 		l2cap_chan_unlock(chan);
4860 		return;
4861 	}
4862 
4863 	if (chan->state != BT_CONNECTED) {
4864 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4865 	} else if (result != L2CAP_MR_SUCCESS) {
4866 		l2cap_do_move_cancel(chan, result);
4867 	} else {
4868 		switch (chan->move_role) {
4869 		case L2CAP_MOVE_ROLE_INITIATOR:
4870 			l2cap_do_move_initiate(chan, local_amp_id,
4871 					       remote_amp_id);
4872 			break;
4873 		case L2CAP_MOVE_ROLE_RESPONDER:
4874 			l2cap_do_move_respond(chan, result);
4875 			break;
4876 		default:
4877 			l2cap_do_move_cancel(chan, result);
4878 			break;
4879 		}
4880 	}
4881 }
4882 
4883 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4884 					 struct l2cap_cmd_hdr *cmd,
4885 					 u16 cmd_len, void *data)
4886 {
4887 	struct l2cap_move_chan_req *req = data;
4888 	struct l2cap_move_chan_rsp rsp;
4889 	struct l2cap_chan *chan;
4890 	u16 icid = 0;
4891 	u16 result = L2CAP_MR_NOT_ALLOWED;
4892 
4893 	if (cmd_len != sizeof(*req))
4894 		return -EPROTO;
4895 
4896 	icid = le16_to_cpu(req->icid);
4897 
4898 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4899 
4900 	if (!conn->hs_enabled)
4901 		return -EINVAL;
4902 
4903 	chan = l2cap_get_chan_by_dcid(conn, icid);
4904 	if (!chan) {
4905 		rsp.icid = cpu_to_le16(icid);
4906 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4907 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4908 			       sizeof(rsp), &rsp);
4909 		return 0;
4910 	}
4911 
4912 	chan->ident = cmd->ident;
4913 
4914 	if (chan->scid < L2CAP_CID_DYN_START ||
4915 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4916 	    (chan->mode != L2CAP_MODE_ERTM &&
4917 	     chan->mode != L2CAP_MODE_STREAMING)) {
4918 		result = L2CAP_MR_NOT_ALLOWED;
4919 		goto send_move_response;
4920 	}
4921 
4922 	if (chan->local_amp_id == req->dest_amp_id) {
4923 		result = L2CAP_MR_SAME_ID;
4924 		goto send_move_response;
4925 	}
4926 
4927 	if (req->dest_amp_id != AMP_ID_BREDR) {
4928 		struct hci_dev *hdev;
4929 		hdev = hci_dev_get(req->dest_amp_id);
4930 		if (!hdev || hdev->dev_type != HCI_AMP ||
4931 		    !test_bit(HCI_UP, &hdev->flags)) {
4932 			if (hdev)
4933 				hci_dev_put(hdev);
4934 
4935 			result = L2CAP_MR_BAD_ID;
4936 			goto send_move_response;
4937 		}
4938 		hci_dev_put(hdev);
4939 	}
4940 
4941 	/* Detect a move collision.  Only send a collision response
4942 	 * if this side has "lost", otherwise proceed with the move.
4943 	 * The winner has the larger bd_addr.
4944 	 */
4945 	if ((__chan_is_moving(chan) ||
4946 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4947 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4948 		result = L2CAP_MR_COLLISION;
4949 		goto send_move_response;
4950 	}
4951 
4952 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4953 	l2cap_move_setup(chan);
4954 	chan->move_id = req->dest_amp_id;
4955 	icid = chan->dcid;
4956 
4957 	if (req->dest_amp_id == AMP_ID_BREDR) {
4958 		/* Moving to BR/EDR */
4959 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4960 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4961 			result = L2CAP_MR_PEND;
4962 		} else {
4963 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4964 			result = L2CAP_MR_SUCCESS;
4965 		}
4966 	} else {
4967 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4968 		/* Placeholder - uncomment when amp functions are available */
4969 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4970 		result = L2CAP_MR_PEND;
4971 	}
4972 
4973 send_move_response:
4974 	l2cap_send_move_chan_rsp(chan, result);
4975 
4976 	l2cap_chan_unlock(chan);
4977 
4978 	return 0;
4979 }
4980 
4981 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4982 {
4983 	struct l2cap_chan *chan;
4984 	struct hci_chan *hchan = NULL;
4985 
4986 	chan = l2cap_get_chan_by_scid(conn, icid);
4987 	if (!chan) {
4988 		l2cap_send_move_chan_cfm_icid(conn, icid);
4989 		return;
4990 	}
4991 
4992 	__clear_chan_timer(chan);
4993 	if (result == L2CAP_MR_PEND)
4994 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4995 
4996 	switch (chan->move_state) {
4997 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4998 		/* Move confirm will be sent when logical link
4999 		 * is complete.
5000 		 */
5001 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5002 		break;
5003 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5004 		if (result == L2CAP_MR_PEND) {
5005 			break;
5006 		} else if (test_bit(CONN_LOCAL_BUSY,
5007 				    &chan->conn_state)) {
5008 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5009 		} else {
5010 			/* Logical link is up or moving to BR/EDR,
5011 			 * proceed with move
5012 			 */
5013 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5014 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5015 		}
5016 		break;
5017 	case L2CAP_MOVE_WAIT_RSP:
5018 		/* Moving to AMP */
5019 		if (result == L2CAP_MR_SUCCESS) {
5020 			/* Remote is ready, send confirm immediately
5021 			 * after logical link is ready
5022 			 */
5023 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5024 		} else {
5025 			/* Both logical link and move success
5026 			 * are required to confirm
5027 			 */
5028 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5029 		}
5030 
5031 		/* Placeholder - get hci_chan for logical link */
5032 		if (!hchan) {
5033 			/* Logical link not available */
5034 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5035 			break;
5036 		}
5037 
5038 		/* If the logical link is not yet connected, do not
5039 		 * send confirmation.
5040 		 */
5041 		if (hchan->state != BT_CONNECTED)
5042 			break;
5043 
5044 		/* Logical link is already ready to go */
5045 
5046 		chan->hs_hcon = hchan->conn;
5047 		chan->hs_hcon->l2cap_data = chan->conn;
5048 
5049 		if (result == L2CAP_MR_SUCCESS) {
5050 			/* Can confirm now */
5051 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5052 		} else {
5053 			/* Now only need move success
5054 			 * to confirm
5055 			 */
5056 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5057 		}
5058 
5059 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5060 		break;
5061 	default:
5062 		/* Any other amp move state means the move failed. */
5063 		chan->move_id = chan->local_amp_id;
5064 		l2cap_move_done(chan);
5065 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5066 	}
5067 
5068 	l2cap_chan_unlock(chan);
5069 }
5070 
5071 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5072 			    u16 result)
5073 {
5074 	struct l2cap_chan *chan;
5075 
5076 	chan = l2cap_get_chan_by_ident(conn, ident);
5077 	if (!chan) {
5078 		/* Could not locate channel, icid is best guess */
5079 		l2cap_send_move_chan_cfm_icid(conn, icid);
5080 		return;
5081 	}
5082 
5083 	__clear_chan_timer(chan);
5084 
5085 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5086 		if (result == L2CAP_MR_COLLISION) {
5087 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5088 		} else {
5089 			/* Cleanup - cancel move */
5090 			chan->move_id = chan->local_amp_id;
5091 			l2cap_move_done(chan);
5092 		}
5093 	}
5094 
5095 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5096 
5097 	l2cap_chan_unlock(chan);
5098 }
5099 
5100 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5101 				  struct l2cap_cmd_hdr *cmd,
5102 				  u16 cmd_len, void *data)
5103 {
5104 	struct l2cap_move_chan_rsp *rsp = data;
5105 	u16 icid, result;
5106 
5107 	if (cmd_len != sizeof(*rsp))
5108 		return -EPROTO;
5109 
5110 	icid = le16_to_cpu(rsp->icid);
5111 	result = le16_to_cpu(rsp->result);
5112 
5113 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5114 
5115 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5116 		l2cap_move_continue(conn, icid, result);
5117 	else
5118 		l2cap_move_fail(conn, cmd->ident, icid, result);
5119 
5120 	return 0;
5121 }
5122 
5123 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5124 				      struct l2cap_cmd_hdr *cmd,
5125 				      u16 cmd_len, void *data)
5126 {
5127 	struct l2cap_move_chan_cfm *cfm = data;
5128 	struct l2cap_chan *chan;
5129 	u16 icid, result;
5130 
5131 	if (cmd_len != sizeof(*cfm))
5132 		return -EPROTO;
5133 
5134 	icid = le16_to_cpu(cfm->icid);
5135 	result = le16_to_cpu(cfm->result);
5136 
5137 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5138 
5139 	chan = l2cap_get_chan_by_dcid(conn, icid);
5140 	if (!chan) {
5141 		/* Spec requires a response even if the icid was not found */
5142 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5143 		return 0;
5144 	}
5145 
5146 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5147 		if (result == L2CAP_MC_CONFIRMED) {
5148 			chan->local_amp_id = chan->move_id;
5149 			if (chan->local_amp_id == AMP_ID_BREDR)
5150 				__release_logical_link(chan);
5151 		} else {
5152 			chan->move_id = chan->local_amp_id;
5153 		}
5154 
5155 		l2cap_move_done(chan);
5156 	}
5157 
5158 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5159 
5160 	l2cap_chan_unlock(chan);
5161 
5162 	return 0;
5163 }
5164 
5165 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5166 						 struct l2cap_cmd_hdr *cmd,
5167 						 u16 cmd_len, void *data)
5168 {
5169 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5170 	struct l2cap_chan *chan;
5171 	u16 icid;
5172 
5173 	if (cmd_len != sizeof(*rsp))
5174 		return -EPROTO;
5175 
5176 	icid = le16_to_cpu(rsp->icid);
5177 
5178 	BT_DBG("icid 0x%4.4x", icid);
5179 
5180 	chan = l2cap_get_chan_by_scid(conn, icid);
5181 	if (!chan)
5182 		return 0;
5183 
5184 	__clear_chan_timer(chan);
5185 
5186 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5187 		chan->local_amp_id = chan->move_id;
5188 
5189 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5190 			__release_logical_link(chan);
5191 
5192 		l2cap_move_done(chan);
5193 	}
5194 
5195 	l2cap_chan_unlock(chan);
5196 
5197 	return 0;
5198 }
5199 
5200 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5201 					 u16 to_multiplier)
5202 {
5203 	u16 max_latency;
5204 
5205 	if (min > max || min < 6 || max > 3200)
5206 		return -EINVAL;
5207 
5208 	if (to_multiplier < 10 || to_multiplier > 3200)
5209 		return -EINVAL;
5210 
5211 	if (max >= to_multiplier * 8)
5212 		return -EINVAL;
5213 
5214 	max_latency = (to_multiplier * 8 / max) - 1;
5215 	if (latency > 499 || latency > max_latency)
5216 		return -EINVAL;
5217 
5218 	return 0;
5219 }
5220 
5221 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5222 					      struct l2cap_cmd_hdr *cmd,
5223 					      u16 cmd_len, u8 *data)
5224 {
5225 	struct hci_conn *hcon = conn->hcon;
5226 	struct l2cap_conn_param_update_req *req;
5227 	struct l2cap_conn_param_update_rsp rsp;
5228 	u16 min, max, latency, to_multiplier;
5229 	int err;
5230 
5231 	if (!(hcon->link_mode & HCI_LM_MASTER))
5232 		return -EINVAL;
5233 
5234 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5235 		return -EPROTO;
5236 
5237 	req = (struct l2cap_conn_param_update_req *) data;
5238 	min		= __le16_to_cpu(req->min);
5239 	max		= __le16_to_cpu(req->max);
5240 	latency		= __le16_to_cpu(req->latency);
5241 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5242 
5243 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5244 	       min, max, latency, to_multiplier);
5245 
5246 	memset(&rsp, 0, sizeof(rsp));
5247 
5248 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5249 	if (err)
5250 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5251 	else
5252 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5253 
5254 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5255 		       sizeof(rsp), &rsp);
5256 
5257 	if (!err)
5258 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5259 
5260 	return 0;
5261 }
5262 
5263 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5264 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5265 				u8 *data)
5266 {
5267 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5268 	u16 dcid, mtu, mps, credits, result;
5269 	struct l2cap_chan *chan;
5270 	int err;
5271 
5272 	if (cmd_len < sizeof(*rsp))
5273 		return -EPROTO;
5274 
5275 	dcid    = __le16_to_cpu(rsp->dcid);
5276 	mtu     = __le16_to_cpu(rsp->mtu);
5277 	mps     = __le16_to_cpu(rsp->mps);
5278 	credits = __le16_to_cpu(rsp->credits);
5279 	result  = __le16_to_cpu(rsp->result);
5280 
5281 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5282 		return -EPROTO;
5283 
5284 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5285 	       dcid, mtu, mps, credits, result);
5286 
5287 	mutex_lock(&conn->chan_lock);
5288 
5289 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5290 	if (!chan) {
5291 		err = -EBADSLT;
5292 		goto unlock;
5293 	}
5294 
5295 	err = 0;
5296 
5297 	l2cap_chan_lock(chan);
5298 
5299 	switch (result) {
5300 	case L2CAP_CR_SUCCESS:
5301 		chan->ident = 0;
5302 		chan->dcid = dcid;
5303 		chan->omtu = mtu;
5304 		chan->remote_mps = mps;
5305 		chan->tx_credits = credits;
5306 		l2cap_chan_ready(chan);
5307 		break;
5308 
5309 	default:
5310 		l2cap_chan_del(chan, ECONNREFUSED);
5311 		break;
5312 	}
5313 
5314 	l2cap_chan_unlock(chan);
5315 
5316 unlock:
5317 	mutex_unlock(&conn->chan_lock);
5318 
5319 	return err;
5320 }
5321 
5322 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5323 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5324 				      u8 *data)
5325 {
5326 	int err = 0;
5327 
5328 	switch (cmd->code) {
5329 	case L2CAP_COMMAND_REJ:
5330 		l2cap_command_rej(conn, cmd, cmd_len, data);
5331 		break;
5332 
5333 	case L2CAP_CONN_REQ:
5334 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5335 		break;
5336 
5337 	case L2CAP_CONN_RSP:
5338 	case L2CAP_CREATE_CHAN_RSP:
5339 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5340 		break;
5341 
5342 	case L2CAP_CONF_REQ:
5343 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5344 		break;
5345 
5346 	case L2CAP_CONF_RSP:
5347 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5348 		break;
5349 
5350 	case L2CAP_DISCONN_REQ:
5351 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5352 		break;
5353 
5354 	case L2CAP_DISCONN_RSP:
5355 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5356 		break;
5357 
5358 	case L2CAP_ECHO_REQ:
5359 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5360 		break;
5361 
5362 	case L2CAP_ECHO_RSP:
5363 		break;
5364 
5365 	case L2CAP_INFO_REQ:
5366 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5367 		break;
5368 
5369 	case L2CAP_INFO_RSP:
5370 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5371 		break;
5372 
5373 	case L2CAP_CREATE_CHAN_REQ:
5374 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5375 		break;
5376 
5377 	case L2CAP_MOVE_CHAN_REQ:
5378 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5379 		break;
5380 
5381 	case L2CAP_MOVE_CHAN_RSP:
5382 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5383 		break;
5384 
5385 	case L2CAP_MOVE_CHAN_CFM:
5386 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5387 		break;
5388 
5389 	case L2CAP_MOVE_CHAN_CFM_RSP:
5390 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5391 		break;
5392 
5393 	default:
5394 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5395 		err = -EINVAL;
5396 		break;
5397 	}
5398 
5399 	return err;
5400 }
5401 
5402 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5403 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5404 				u8 *data)
5405 {
5406 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5407 	struct l2cap_le_conn_rsp rsp;
5408 	struct l2cap_chan *chan, *pchan;
5409 	u16 dcid, scid, credits, mtu, mps;
5410 	__le16 psm;
5411 	u8 result;
5412 
5413 	if (cmd_len != sizeof(*req))
5414 		return -EPROTO;
5415 
5416 	scid = __le16_to_cpu(req->scid);
5417 	mtu  = __le16_to_cpu(req->mtu);
5418 	mps  = __le16_to_cpu(req->mps);
5419 	psm  = req->psm;
5420 	dcid = 0;
5421 	credits = 0;
5422 
5423 	if (mtu < 23 || mps < 23)
5424 		return -EPROTO;
5425 
5426 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5427 	       scid, mtu, mps);
5428 
5429 	/* Check if we have socket listening on psm */
5430 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5431 					 &conn->hcon->dst, LE_LINK);
5432 	if (!pchan) {
5433 		result = L2CAP_CR_BAD_PSM;
5434 		chan = NULL;
5435 		goto response;
5436 	}
5437 
5438 	mutex_lock(&conn->chan_lock);
5439 	l2cap_chan_lock(pchan);
5440 
5441 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5442 		result = L2CAP_CR_AUTHENTICATION;
5443 		chan = NULL;
5444 		goto response_unlock;
5445 	}
5446 
5447 	/* Check if we already have channel with that dcid */
5448 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5449 		result = L2CAP_CR_NO_MEM;
5450 		chan = NULL;
5451 		goto response_unlock;
5452 	}
5453 
5454 	chan = pchan->ops->new_connection(pchan);
5455 	if (!chan) {
5456 		result = L2CAP_CR_NO_MEM;
5457 		goto response_unlock;
5458 	}
5459 
5460 	l2cap_le_flowctl_init(chan);
5461 
5462 	bacpy(&chan->src, &conn->hcon->src);
5463 	bacpy(&chan->dst, &conn->hcon->dst);
5464 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5465 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5466 	chan->psm  = psm;
5467 	chan->dcid = scid;
5468 	chan->omtu = mtu;
5469 	chan->remote_mps = mps;
5470 	chan->tx_credits = __le16_to_cpu(req->credits);
5471 
5472 	__l2cap_chan_add(conn, chan);
5473 	dcid = chan->scid;
5474 	credits = chan->rx_credits;
5475 
5476 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5477 
5478 	chan->ident = cmd->ident;
5479 
5480 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5481 		l2cap_state_change(chan, BT_CONNECT2);
5482 		result = L2CAP_CR_PEND;
5483 		chan->ops->defer(chan);
5484 	} else {
5485 		l2cap_chan_ready(chan);
5486 		result = L2CAP_CR_SUCCESS;
5487 	}
5488 
5489 response_unlock:
5490 	l2cap_chan_unlock(pchan);
5491 	mutex_unlock(&conn->chan_lock);
5492 
5493 	if (result == L2CAP_CR_PEND)
5494 		return 0;
5495 
5496 response:
5497 	if (chan) {
5498 		rsp.mtu = cpu_to_le16(chan->imtu);
5499 		rsp.mps = cpu_to_le16(chan->mps);
5500 	} else {
5501 		rsp.mtu = 0;
5502 		rsp.mps = 0;
5503 	}
5504 
5505 	rsp.dcid    = cpu_to_le16(dcid);
5506 	rsp.credits = cpu_to_le16(credits);
5507 	rsp.result  = cpu_to_le16(result);
5508 
5509 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5510 
5511 	return 0;
5512 }
5513 
5514 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5515 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5516 				   u8 *data)
5517 {
5518 	struct l2cap_le_credits *pkt;
5519 	struct l2cap_chan *chan;
5520 	u16 cid, credits, max_credits;
5521 
5522 	if (cmd_len != sizeof(*pkt))
5523 		return -EPROTO;
5524 
5525 	pkt = (struct l2cap_le_credits *) data;
5526 	cid	= __le16_to_cpu(pkt->cid);
5527 	credits	= __le16_to_cpu(pkt->credits);
5528 
5529 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5530 
5531 	chan = l2cap_get_chan_by_dcid(conn, cid);
5532 	if (!chan)
5533 		return -EBADSLT;
5534 
5535 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5536 	if (credits > max_credits) {
5537 		BT_ERR("LE credits overflow");
5538 		l2cap_send_disconn_req(chan, ECONNRESET);
5539 
5540 		/* Return 0 so that we don't trigger an unnecessary
5541 		 * command reject packet.
5542 		 */
5543 		return 0;
5544 	}
5545 
5546 	chan->tx_credits += credits;
5547 
5548 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5549 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5550 		chan->tx_credits--;
5551 	}
5552 
5553 	if (chan->tx_credits)
5554 		chan->ops->resume(chan);
5555 
5556 	l2cap_chan_unlock(chan);
5557 
5558 	return 0;
5559 }
5560 
5561 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5562 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5563 				       u8 *data)
5564 {
5565 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5566 	struct l2cap_chan *chan;
5567 
5568 	if (cmd_len < sizeof(*rej))
5569 		return -EPROTO;
5570 
5571 	mutex_lock(&conn->chan_lock);
5572 
5573 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5574 	if (!chan)
5575 		goto done;
5576 
5577 	l2cap_chan_lock(chan);
5578 	l2cap_chan_del(chan, ECONNREFUSED);
5579 	l2cap_chan_unlock(chan);
5580 
5581 done:
5582 	mutex_unlock(&conn->chan_lock);
5583 	return 0;
5584 }
5585 
5586 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5587 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5588 				   u8 *data)
5589 {
5590 	int err = 0;
5591 
5592 	switch (cmd->code) {
5593 	case L2CAP_COMMAND_REJ:
5594 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5595 		break;
5596 
5597 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5598 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5599 		break;
5600 
5601 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5602 		break;
5603 
5604 	case L2CAP_LE_CONN_RSP:
5605 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5606 		break;
5607 
5608 	case L2CAP_LE_CONN_REQ:
5609 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5610 		break;
5611 
5612 	case L2CAP_LE_CREDITS:
5613 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5614 		break;
5615 
5616 	case L2CAP_DISCONN_REQ:
5617 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5618 		break;
5619 
5620 	case L2CAP_DISCONN_RSP:
5621 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5622 		break;
5623 
5624 	default:
5625 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5626 		err = -EINVAL;
5627 		break;
5628 	}
5629 
5630 	return err;
5631 }
5632 
5633 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5634 					struct sk_buff *skb)
5635 {
5636 	struct hci_conn *hcon = conn->hcon;
5637 	struct l2cap_cmd_hdr *cmd;
5638 	u16 len;
5639 	int err;
5640 
5641 	if (hcon->type != LE_LINK)
5642 		goto drop;
5643 
5644 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5645 		goto drop;
5646 
5647 	cmd = (void *) skb->data;
5648 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5649 
5650 	len = le16_to_cpu(cmd->len);
5651 
5652 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5653 
5654 	if (len != skb->len || !cmd->ident) {
5655 		BT_DBG("corrupted command");
5656 		goto drop;
5657 	}
5658 
5659 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5660 	if (err) {
5661 		struct l2cap_cmd_rej_unk rej;
5662 
5663 		BT_ERR("Wrong link type (%d)", err);
5664 
5665 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5666 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5667 			       sizeof(rej), &rej);
5668 	}
5669 
5670 drop:
5671 	kfree_skb(skb);
5672 }
5673 
5674 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5675 				     struct sk_buff *skb)
5676 {
5677 	struct hci_conn *hcon = conn->hcon;
5678 	u8 *data = skb->data;
5679 	int len = skb->len;
5680 	struct l2cap_cmd_hdr cmd;
5681 	int err;
5682 
5683 	l2cap_raw_recv(conn, skb);
5684 
5685 	if (hcon->type != ACL_LINK)
5686 		goto drop;
5687 
5688 	while (len >= L2CAP_CMD_HDR_SIZE) {
5689 		u16 cmd_len;
5690 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5691 		data += L2CAP_CMD_HDR_SIZE;
5692 		len  -= L2CAP_CMD_HDR_SIZE;
5693 
5694 		cmd_len = le16_to_cpu(cmd.len);
5695 
5696 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5697 		       cmd.ident);
5698 
5699 		if (cmd_len > len || !cmd.ident) {
5700 			BT_DBG("corrupted command");
5701 			break;
5702 		}
5703 
5704 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5705 		if (err) {
5706 			struct l2cap_cmd_rej_unk rej;
5707 
5708 			BT_ERR("Wrong link type (%d)", err);
5709 
5710 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5711 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5712 				       sizeof(rej), &rej);
5713 		}
5714 
5715 		data += cmd_len;
5716 		len  -= cmd_len;
5717 	}
5718 
5719 drop:
5720 	kfree_skb(skb);
5721 }
5722 
5723 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5724 {
5725 	u16 our_fcs, rcv_fcs;
5726 	int hdr_size;
5727 
5728 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5729 		hdr_size = L2CAP_EXT_HDR_SIZE;
5730 	else
5731 		hdr_size = L2CAP_ENH_HDR_SIZE;
5732 
5733 	if (chan->fcs == L2CAP_FCS_CRC16) {
5734 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5735 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5736 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5737 
5738 		if (our_fcs != rcv_fcs)
5739 			return -EBADMSG;
5740 	}
5741 	return 0;
5742 }
5743 
5744 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5745 {
5746 	struct l2cap_ctrl control;
5747 
5748 	BT_DBG("chan %p", chan);
5749 
5750 	memset(&control, 0, sizeof(control));
5751 	control.sframe = 1;
5752 	control.final = 1;
5753 	control.reqseq = chan->buffer_seq;
5754 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5755 
5756 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5757 		control.super = L2CAP_SUPER_RNR;
5758 		l2cap_send_sframe(chan, &control);
5759 	}
5760 
5761 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5762 	    chan->unacked_frames > 0)
5763 		__set_retrans_timer(chan);
5764 
5765 	/* Send pending iframes */
5766 	l2cap_ertm_send(chan);
5767 
5768 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5769 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5770 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5771 		 * send it now.
5772 		 */
5773 		control.super = L2CAP_SUPER_RR;
5774 		l2cap_send_sframe(chan, &control);
5775 	}
5776 }
5777 
5778 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5779 			    struct sk_buff **last_frag)
5780 {
5781 	/* skb->len reflects data in skb as well as all fragments
5782 	 * skb->data_len reflects only data in fragments
5783 	 */
5784 	if (!skb_has_frag_list(skb))
5785 		skb_shinfo(skb)->frag_list = new_frag;
5786 
5787 	new_frag->next = NULL;
5788 
5789 	(*last_frag)->next = new_frag;
5790 	*last_frag = new_frag;
5791 
5792 	skb->len += new_frag->len;
5793 	skb->data_len += new_frag->len;
5794 	skb->truesize += new_frag->truesize;
5795 }
5796 
5797 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5798 				struct l2cap_ctrl *control)
5799 {
5800 	int err = -EINVAL;
5801 
5802 	switch (control->sar) {
5803 	case L2CAP_SAR_UNSEGMENTED:
5804 		if (chan->sdu)
5805 			break;
5806 
5807 		err = chan->ops->recv(chan, skb);
5808 		break;
5809 
5810 	case L2CAP_SAR_START:
5811 		if (chan->sdu)
5812 			break;
5813 
5814 		chan->sdu_len = get_unaligned_le16(skb->data);
5815 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5816 
5817 		if (chan->sdu_len > chan->imtu) {
5818 			err = -EMSGSIZE;
5819 			break;
5820 		}
5821 
5822 		if (skb->len >= chan->sdu_len)
5823 			break;
5824 
5825 		chan->sdu = skb;
5826 		chan->sdu_last_frag = skb;
5827 
5828 		skb = NULL;
5829 		err = 0;
5830 		break;
5831 
5832 	case L2CAP_SAR_CONTINUE:
5833 		if (!chan->sdu)
5834 			break;
5835 
5836 		append_skb_frag(chan->sdu, skb,
5837 				&chan->sdu_last_frag);
5838 		skb = NULL;
5839 
5840 		if (chan->sdu->len >= chan->sdu_len)
5841 			break;
5842 
5843 		err = 0;
5844 		break;
5845 
5846 	case L2CAP_SAR_END:
5847 		if (!chan->sdu)
5848 			break;
5849 
5850 		append_skb_frag(chan->sdu, skb,
5851 				&chan->sdu_last_frag);
5852 		skb = NULL;
5853 
5854 		if (chan->sdu->len != chan->sdu_len)
5855 			break;
5856 
5857 		err = chan->ops->recv(chan, chan->sdu);
5858 
5859 		if (!err) {
5860 			/* Reassembly complete */
5861 			chan->sdu = NULL;
5862 			chan->sdu_last_frag = NULL;
5863 			chan->sdu_len = 0;
5864 		}
5865 		break;
5866 	}
5867 
5868 	if (err) {
5869 		kfree_skb(skb);
5870 		kfree_skb(chan->sdu);
5871 		chan->sdu = NULL;
5872 		chan->sdu_last_frag = NULL;
5873 		chan->sdu_len = 0;
5874 	}
5875 
5876 	return err;
5877 }
5878 
5879 static int l2cap_resegment(struct l2cap_chan *chan)
5880 {
5881 	/* Placeholder */
5882 	return 0;
5883 }
5884 
5885 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5886 {
5887 	u8 event;
5888 
5889 	if (chan->mode != L2CAP_MODE_ERTM)
5890 		return;
5891 
5892 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5893 	l2cap_tx(chan, NULL, NULL, event);
5894 }
5895 
5896 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5897 {
5898 	int err = 0;
5899 	/* Pass sequential frames to l2cap_reassemble_sdu()
5900 	 * until a gap is encountered.
5901 	 */
5902 
5903 	BT_DBG("chan %p", chan);
5904 
5905 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5906 		struct sk_buff *skb;
5907 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5908 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5909 
5910 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5911 
5912 		if (!skb)
5913 			break;
5914 
5915 		skb_unlink(skb, &chan->srej_q);
5916 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5917 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5918 		if (err)
5919 			break;
5920 	}
5921 
5922 	if (skb_queue_empty(&chan->srej_q)) {
5923 		chan->rx_state = L2CAP_RX_STATE_RECV;
5924 		l2cap_send_ack(chan);
5925 	}
5926 
5927 	return err;
5928 }
5929 
5930 static void l2cap_handle_srej(struct l2cap_chan *chan,
5931 			      struct l2cap_ctrl *control)
5932 {
5933 	struct sk_buff *skb;
5934 
5935 	BT_DBG("chan %p, control %p", chan, control);
5936 
5937 	if (control->reqseq == chan->next_tx_seq) {
5938 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5939 		l2cap_send_disconn_req(chan, ECONNRESET);
5940 		return;
5941 	}
5942 
5943 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5944 
5945 	if (skb == NULL) {
5946 		BT_DBG("Seq %d not available for retransmission",
5947 		       control->reqseq);
5948 		return;
5949 	}
5950 
5951 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5952 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5953 		l2cap_send_disconn_req(chan, ECONNRESET);
5954 		return;
5955 	}
5956 
5957 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5958 
5959 	if (control->poll) {
5960 		l2cap_pass_to_tx(chan, control);
5961 
5962 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5963 		l2cap_retransmit(chan, control);
5964 		l2cap_ertm_send(chan);
5965 
5966 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5967 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5968 			chan->srej_save_reqseq = control->reqseq;
5969 		}
5970 	} else {
5971 		l2cap_pass_to_tx_fbit(chan, control);
5972 
5973 		if (control->final) {
5974 			if (chan->srej_save_reqseq != control->reqseq ||
5975 			    !test_and_clear_bit(CONN_SREJ_ACT,
5976 						&chan->conn_state))
5977 				l2cap_retransmit(chan, control);
5978 		} else {
5979 			l2cap_retransmit(chan, control);
5980 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5981 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5982 				chan->srej_save_reqseq = control->reqseq;
5983 			}
5984 		}
5985 	}
5986 }
5987 
5988 static void l2cap_handle_rej(struct l2cap_chan *chan,
5989 			     struct l2cap_ctrl *control)
5990 {
5991 	struct sk_buff *skb;
5992 
5993 	BT_DBG("chan %p, control %p", chan, control);
5994 
5995 	if (control->reqseq == chan->next_tx_seq) {
5996 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5997 		l2cap_send_disconn_req(chan, ECONNRESET);
5998 		return;
5999 	}
6000 
6001 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6002 
6003 	if (chan->max_tx && skb &&
6004 	    bt_cb(skb)->control.retries >= chan->max_tx) {
6005 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6006 		l2cap_send_disconn_req(chan, ECONNRESET);
6007 		return;
6008 	}
6009 
6010 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6011 
6012 	l2cap_pass_to_tx(chan, control);
6013 
6014 	if (control->final) {
6015 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6016 			l2cap_retransmit_all(chan, control);
6017 	} else {
6018 		l2cap_retransmit_all(chan, control);
6019 		l2cap_ertm_send(chan);
6020 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6021 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6022 	}
6023 }
6024 
6025 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6026 {
6027 	BT_DBG("chan %p, txseq %d", chan, txseq);
6028 
6029 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6030 	       chan->expected_tx_seq);
6031 
6032 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6033 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6034 		    chan->tx_win) {
6035 			/* See notes below regarding "double poll" and
6036 			 * invalid packets.
6037 			 */
6038 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6039 				BT_DBG("Invalid/Ignore - after SREJ");
6040 				return L2CAP_TXSEQ_INVALID_IGNORE;
6041 			} else {
6042 				BT_DBG("Invalid - in window after SREJ sent");
6043 				return L2CAP_TXSEQ_INVALID;
6044 			}
6045 		}
6046 
6047 		if (chan->srej_list.head == txseq) {
6048 			BT_DBG("Expected SREJ");
6049 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6050 		}
6051 
6052 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6053 			BT_DBG("Duplicate SREJ - txseq already stored");
6054 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6055 		}
6056 
6057 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6058 			BT_DBG("Unexpected SREJ - not requested");
6059 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6060 		}
6061 	}
6062 
6063 	if (chan->expected_tx_seq == txseq) {
6064 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6065 		    chan->tx_win) {
6066 			BT_DBG("Invalid - txseq outside tx window");
6067 			return L2CAP_TXSEQ_INVALID;
6068 		} else {
6069 			BT_DBG("Expected");
6070 			return L2CAP_TXSEQ_EXPECTED;
6071 		}
6072 	}
6073 
6074 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6075 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6076 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6077 		return L2CAP_TXSEQ_DUPLICATE;
6078 	}
6079 
6080 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6081 		/* A source of invalid packets is a "double poll" condition,
6082 		 * where delays cause us to send multiple poll packets.  If
6083 		 * the remote stack receives and processes both polls,
6084 		 * sequence numbers can wrap around in such a way that a
6085 		 * resent frame has a sequence number that looks like new data
6086 		 * with a sequence gap.  This would trigger an erroneous SREJ
6087 		 * request.
6088 		 *
6089 		 * Fortunately, this is impossible with a tx window that's
6090 		 * less than half of the maximum sequence number, which allows
6091 		 * invalid frames to be safely ignored.
6092 		 *
6093 		 * With tx window sizes greater than half of the tx window
6094 		 * maximum, the frame is invalid and cannot be ignored.  This
6095 		 * causes a disconnect.
6096 		 */
6097 
6098 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6099 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6100 			return L2CAP_TXSEQ_INVALID_IGNORE;
6101 		} else {
6102 			BT_DBG("Invalid - txseq outside tx window");
6103 			return L2CAP_TXSEQ_INVALID;
6104 		}
6105 	} else {
6106 		BT_DBG("Unexpected - txseq indicates missing frames");
6107 		return L2CAP_TXSEQ_UNEXPECTED;
6108 	}
6109 }
6110 
6111 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6112 			       struct l2cap_ctrl *control,
6113 			       struct sk_buff *skb, u8 event)
6114 {
6115 	int err = 0;
6116 	bool skb_in_use = false;
6117 
6118 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6119 	       event);
6120 
6121 	switch (event) {
6122 	case L2CAP_EV_RECV_IFRAME:
6123 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6124 		case L2CAP_TXSEQ_EXPECTED:
6125 			l2cap_pass_to_tx(chan, control);
6126 
6127 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6128 				BT_DBG("Busy, discarding expected seq %d",
6129 				       control->txseq);
6130 				break;
6131 			}
6132 
6133 			chan->expected_tx_seq = __next_seq(chan,
6134 							   control->txseq);
6135 
6136 			chan->buffer_seq = chan->expected_tx_seq;
6137 			skb_in_use = true;
6138 
6139 			err = l2cap_reassemble_sdu(chan, skb, control);
6140 			if (err)
6141 				break;
6142 
6143 			if (control->final) {
6144 				if (!test_and_clear_bit(CONN_REJ_ACT,
6145 							&chan->conn_state)) {
6146 					control->final = 0;
6147 					l2cap_retransmit_all(chan, control);
6148 					l2cap_ertm_send(chan);
6149 				}
6150 			}
6151 
6152 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6153 				l2cap_send_ack(chan);
6154 			break;
6155 		case L2CAP_TXSEQ_UNEXPECTED:
6156 			l2cap_pass_to_tx(chan, control);
6157 
6158 			/* Can't issue SREJ frames in the local busy state.
6159 			 * Drop this frame, it will be seen as missing
6160 			 * when local busy is exited.
6161 			 */
6162 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6163 				BT_DBG("Busy, discarding unexpected seq %d",
6164 				       control->txseq);
6165 				break;
6166 			}
6167 
6168 			/* There was a gap in the sequence, so an SREJ
6169 			 * must be sent for each missing frame.  The
6170 			 * current frame is stored for later use.
6171 			 */
6172 			skb_queue_tail(&chan->srej_q, skb);
6173 			skb_in_use = true;
6174 			BT_DBG("Queued %p (queue len %d)", skb,
6175 			       skb_queue_len(&chan->srej_q));
6176 
6177 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6178 			l2cap_seq_list_clear(&chan->srej_list);
6179 			l2cap_send_srej(chan, control->txseq);
6180 
6181 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6182 			break;
6183 		case L2CAP_TXSEQ_DUPLICATE:
6184 			l2cap_pass_to_tx(chan, control);
6185 			break;
6186 		case L2CAP_TXSEQ_INVALID_IGNORE:
6187 			break;
6188 		case L2CAP_TXSEQ_INVALID:
6189 		default:
6190 			l2cap_send_disconn_req(chan, ECONNRESET);
6191 			break;
6192 		}
6193 		break;
6194 	case L2CAP_EV_RECV_RR:
6195 		l2cap_pass_to_tx(chan, control);
6196 		if (control->final) {
6197 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6198 
6199 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6200 			    !__chan_is_moving(chan)) {
6201 				control->final = 0;
6202 				l2cap_retransmit_all(chan, control);
6203 			}
6204 
6205 			l2cap_ertm_send(chan);
6206 		} else if (control->poll) {
6207 			l2cap_send_i_or_rr_or_rnr(chan);
6208 		} else {
6209 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6210 					       &chan->conn_state) &&
6211 			    chan->unacked_frames)
6212 				__set_retrans_timer(chan);
6213 
6214 			l2cap_ertm_send(chan);
6215 		}
6216 		break;
6217 	case L2CAP_EV_RECV_RNR:
6218 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6219 		l2cap_pass_to_tx(chan, control);
6220 		if (control && control->poll) {
6221 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6222 			l2cap_send_rr_or_rnr(chan, 0);
6223 		}
6224 		__clear_retrans_timer(chan);
6225 		l2cap_seq_list_clear(&chan->retrans_list);
6226 		break;
6227 	case L2CAP_EV_RECV_REJ:
6228 		l2cap_handle_rej(chan, control);
6229 		break;
6230 	case L2CAP_EV_RECV_SREJ:
6231 		l2cap_handle_srej(chan, control);
6232 		break;
6233 	default:
6234 		break;
6235 	}
6236 
6237 	if (skb && !skb_in_use) {
6238 		BT_DBG("Freeing %p", skb);
6239 		kfree_skb(skb);
6240 	}
6241 
6242 	return err;
6243 }
6244 
6245 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6246 				    struct l2cap_ctrl *control,
6247 				    struct sk_buff *skb, u8 event)
6248 {
6249 	int err = 0;
6250 	u16 txseq = control->txseq;
6251 	bool skb_in_use = false;
6252 
6253 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6254 	       event);
6255 
6256 	switch (event) {
6257 	case L2CAP_EV_RECV_IFRAME:
6258 		switch (l2cap_classify_txseq(chan, txseq)) {
6259 		case L2CAP_TXSEQ_EXPECTED:
6260 			/* Keep frame for reassembly later */
6261 			l2cap_pass_to_tx(chan, control);
6262 			skb_queue_tail(&chan->srej_q, skb);
6263 			skb_in_use = true;
6264 			BT_DBG("Queued %p (queue len %d)", skb,
6265 			       skb_queue_len(&chan->srej_q));
6266 
6267 			chan->expected_tx_seq = __next_seq(chan, txseq);
6268 			break;
6269 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6270 			l2cap_seq_list_pop(&chan->srej_list);
6271 
6272 			l2cap_pass_to_tx(chan, control);
6273 			skb_queue_tail(&chan->srej_q, skb);
6274 			skb_in_use = true;
6275 			BT_DBG("Queued %p (queue len %d)", skb,
6276 			       skb_queue_len(&chan->srej_q));
6277 
6278 			err = l2cap_rx_queued_iframes(chan);
6279 			if (err)
6280 				break;
6281 
6282 			break;
6283 		case L2CAP_TXSEQ_UNEXPECTED:
6284 			/* Got a frame that can't be reassembled yet.
6285 			 * Save it for later, and send SREJs to cover
6286 			 * the missing frames.
6287 			 */
6288 			skb_queue_tail(&chan->srej_q, skb);
6289 			skb_in_use = true;
6290 			BT_DBG("Queued %p (queue len %d)", skb,
6291 			       skb_queue_len(&chan->srej_q));
6292 
6293 			l2cap_pass_to_tx(chan, control);
6294 			l2cap_send_srej(chan, control->txseq);
6295 			break;
6296 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6297 			/* This frame was requested with an SREJ, but
6298 			 * some expected retransmitted frames are
6299 			 * missing.  Request retransmission of missing
6300 			 * SREJ'd frames.
6301 			 */
6302 			skb_queue_tail(&chan->srej_q, skb);
6303 			skb_in_use = true;
6304 			BT_DBG("Queued %p (queue len %d)", skb,
6305 			       skb_queue_len(&chan->srej_q));
6306 
6307 			l2cap_pass_to_tx(chan, control);
6308 			l2cap_send_srej_list(chan, control->txseq);
6309 			break;
6310 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6311 			/* We've already queued this frame.  Drop this copy. */
6312 			l2cap_pass_to_tx(chan, control);
6313 			break;
6314 		case L2CAP_TXSEQ_DUPLICATE:
6315 			/* Expecting a later sequence number, so this frame
6316 			 * was already received.  Ignore it completely.
6317 			 */
6318 			break;
6319 		case L2CAP_TXSEQ_INVALID_IGNORE:
6320 			break;
6321 		case L2CAP_TXSEQ_INVALID:
6322 		default:
6323 			l2cap_send_disconn_req(chan, ECONNRESET);
6324 			break;
6325 		}
6326 		break;
6327 	case L2CAP_EV_RECV_RR:
6328 		l2cap_pass_to_tx(chan, control);
6329 		if (control->final) {
6330 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6331 
6332 			if (!test_and_clear_bit(CONN_REJ_ACT,
6333 						&chan->conn_state)) {
6334 				control->final = 0;
6335 				l2cap_retransmit_all(chan, control);
6336 			}
6337 
6338 			l2cap_ertm_send(chan);
6339 		} else if (control->poll) {
6340 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6341 					       &chan->conn_state) &&
6342 			    chan->unacked_frames) {
6343 				__set_retrans_timer(chan);
6344 			}
6345 
6346 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6347 			l2cap_send_srej_tail(chan);
6348 		} else {
6349 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6350 					       &chan->conn_state) &&
6351 			    chan->unacked_frames)
6352 				__set_retrans_timer(chan);
6353 
6354 			l2cap_send_ack(chan);
6355 		}
6356 		break;
6357 	case L2CAP_EV_RECV_RNR:
6358 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6359 		l2cap_pass_to_tx(chan, control);
6360 		if (control->poll) {
6361 			l2cap_send_srej_tail(chan);
6362 		} else {
6363 			struct l2cap_ctrl rr_control;
6364 			memset(&rr_control, 0, sizeof(rr_control));
6365 			rr_control.sframe = 1;
6366 			rr_control.super = L2CAP_SUPER_RR;
6367 			rr_control.reqseq = chan->buffer_seq;
6368 			l2cap_send_sframe(chan, &rr_control);
6369 		}
6370 
6371 		break;
6372 	case L2CAP_EV_RECV_REJ:
6373 		l2cap_handle_rej(chan, control);
6374 		break;
6375 	case L2CAP_EV_RECV_SREJ:
6376 		l2cap_handle_srej(chan, control);
6377 		break;
6378 	}
6379 
6380 	if (skb && !skb_in_use) {
6381 		BT_DBG("Freeing %p", skb);
6382 		kfree_skb(skb);
6383 	}
6384 
6385 	return err;
6386 }
6387 
6388 static int l2cap_finish_move(struct l2cap_chan *chan)
6389 {
6390 	BT_DBG("chan %p", chan);
6391 
6392 	chan->rx_state = L2CAP_RX_STATE_RECV;
6393 
6394 	if (chan->hs_hcon)
6395 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6396 	else
6397 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6398 
6399 	return l2cap_resegment(chan);
6400 }
6401 
6402 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6403 				 struct l2cap_ctrl *control,
6404 				 struct sk_buff *skb, u8 event)
6405 {
6406 	int err;
6407 
6408 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6409 	       event);
6410 
6411 	if (!control->poll)
6412 		return -EPROTO;
6413 
6414 	l2cap_process_reqseq(chan, control->reqseq);
6415 
6416 	if (!skb_queue_empty(&chan->tx_q))
6417 		chan->tx_send_head = skb_peek(&chan->tx_q);
6418 	else
6419 		chan->tx_send_head = NULL;
6420 
6421 	/* Rewind next_tx_seq to the point expected
6422 	 * by the receiver.
6423 	 */
6424 	chan->next_tx_seq = control->reqseq;
6425 	chan->unacked_frames = 0;
6426 
6427 	err = l2cap_finish_move(chan);
6428 	if (err)
6429 		return err;
6430 
6431 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6432 	l2cap_send_i_or_rr_or_rnr(chan);
6433 
6434 	if (event == L2CAP_EV_RECV_IFRAME)
6435 		return -EPROTO;
6436 
6437 	return l2cap_rx_state_recv(chan, control, NULL, event);
6438 }
6439 
6440 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6441 				 struct l2cap_ctrl *control,
6442 				 struct sk_buff *skb, u8 event)
6443 {
6444 	int err;
6445 
6446 	if (!control->final)
6447 		return -EPROTO;
6448 
6449 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6450 
6451 	chan->rx_state = L2CAP_RX_STATE_RECV;
6452 	l2cap_process_reqseq(chan, control->reqseq);
6453 
6454 	if (!skb_queue_empty(&chan->tx_q))
6455 		chan->tx_send_head = skb_peek(&chan->tx_q);
6456 	else
6457 		chan->tx_send_head = NULL;
6458 
6459 	/* Rewind next_tx_seq to the point expected
6460 	 * by the receiver.
6461 	 */
6462 	chan->next_tx_seq = control->reqseq;
6463 	chan->unacked_frames = 0;
6464 
6465 	if (chan->hs_hcon)
6466 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6467 	else
6468 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6469 
6470 	err = l2cap_resegment(chan);
6471 
6472 	if (!err)
6473 		err = l2cap_rx_state_recv(chan, control, skb, event);
6474 
6475 	return err;
6476 }
6477 
6478 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6479 {
6480 	/* Make sure reqseq is for a packet that has been sent but not acked */
6481 	u16 unacked;
6482 
6483 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6484 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6485 }
6486 
6487 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6488 		    struct sk_buff *skb, u8 event)
6489 {
6490 	int err = 0;
6491 
6492 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6493 	       control, skb, event, chan->rx_state);
6494 
6495 	if (__valid_reqseq(chan, control->reqseq)) {
6496 		switch (chan->rx_state) {
6497 		case L2CAP_RX_STATE_RECV:
6498 			err = l2cap_rx_state_recv(chan, control, skb, event);
6499 			break;
6500 		case L2CAP_RX_STATE_SREJ_SENT:
6501 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6502 						       event);
6503 			break;
6504 		case L2CAP_RX_STATE_WAIT_P:
6505 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6506 			break;
6507 		case L2CAP_RX_STATE_WAIT_F:
6508 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6509 			break;
6510 		default:
6511 			/* shut it down */
6512 			break;
6513 		}
6514 	} else {
6515 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6516 		       control->reqseq, chan->next_tx_seq,
6517 		       chan->expected_ack_seq);
6518 		l2cap_send_disconn_req(chan, ECONNRESET);
6519 	}
6520 
6521 	return err;
6522 }
6523 
6524 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6525 			   struct sk_buff *skb)
6526 {
6527 	int err = 0;
6528 
6529 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6530 	       chan->rx_state);
6531 
6532 	if (l2cap_classify_txseq(chan, control->txseq) ==
6533 	    L2CAP_TXSEQ_EXPECTED) {
6534 		l2cap_pass_to_tx(chan, control);
6535 
6536 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6537 		       __next_seq(chan, chan->buffer_seq));
6538 
6539 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6540 
6541 		l2cap_reassemble_sdu(chan, skb, control);
6542 	} else {
6543 		if (chan->sdu) {
6544 			kfree_skb(chan->sdu);
6545 			chan->sdu = NULL;
6546 		}
6547 		chan->sdu_last_frag = NULL;
6548 		chan->sdu_len = 0;
6549 
6550 		if (skb) {
6551 			BT_DBG("Freeing %p", skb);
6552 			kfree_skb(skb);
6553 		}
6554 	}
6555 
6556 	chan->last_acked_seq = control->txseq;
6557 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6558 
6559 	return err;
6560 }
6561 
6562 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6563 {
6564 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6565 	u16 len;
6566 	u8 event;
6567 
6568 	__unpack_control(chan, skb);
6569 
6570 	len = skb->len;
6571 
6572 	/*
6573 	 * We can just drop the corrupted I-frame here.
6574 	 * Receiver will miss it and start proper recovery
6575 	 * procedures and ask for retransmission.
6576 	 */
6577 	if (l2cap_check_fcs(chan, skb))
6578 		goto drop;
6579 
6580 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6581 		len -= L2CAP_SDULEN_SIZE;
6582 
6583 	if (chan->fcs == L2CAP_FCS_CRC16)
6584 		len -= L2CAP_FCS_SIZE;
6585 
6586 	if (len > chan->mps) {
6587 		l2cap_send_disconn_req(chan, ECONNRESET);
6588 		goto drop;
6589 	}
6590 
6591 	if (!control->sframe) {
6592 		int err;
6593 
6594 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6595 		       control->sar, control->reqseq, control->final,
6596 		       control->txseq);
6597 
6598 		/* Validate F-bit - F=0 always valid, F=1 only
6599 		 * valid in TX WAIT_F
6600 		 */
6601 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6602 			goto drop;
6603 
6604 		if (chan->mode != L2CAP_MODE_STREAMING) {
6605 			event = L2CAP_EV_RECV_IFRAME;
6606 			err = l2cap_rx(chan, control, skb, event);
6607 		} else {
6608 			err = l2cap_stream_rx(chan, control, skb);
6609 		}
6610 
6611 		if (err)
6612 			l2cap_send_disconn_req(chan, ECONNRESET);
6613 	} else {
6614 		const u8 rx_func_to_event[4] = {
6615 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6616 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6617 		};
6618 
6619 		/* Only I-frames are expected in streaming mode */
6620 		if (chan->mode == L2CAP_MODE_STREAMING)
6621 			goto drop;
6622 
6623 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6624 		       control->reqseq, control->final, control->poll,
6625 		       control->super);
6626 
6627 		if (len != 0) {
6628 			BT_ERR("Trailing bytes: %d in sframe", len);
6629 			l2cap_send_disconn_req(chan, ECONNRESET);
6630 			goto drop;
6631 		}
6632 
6633 		/* Validate F and P bits */
6634 		if (control->final && (control->poll ||
6635 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6636 			goto drop;
6637 
6638 		event = rx_func_to_event[control->super];
6639 		if (l2cap_rx(chan, control, skb, event))
6640 			l2cap_send_disconn_req(chan, ECONNRESET);
6641 	}
6642 
6643 	return 0;
6644 
6645 drop:
6646 	kfree_skb(skb);
6647 	return 0;
6648 }
6649 
6650 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6651 {
6652 	struct l2cap_conn *conn = chan->conn;
6653 	struct l2cap_le_credits pkt;
6654 	u16 return_credits;
6655 
6656 	/* We return more credits to the sender only after the amount of
6657 	 * credits falls below half of the initial amount.
6658 	 */
6659 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6660 		return;
6661 
6662 	return_credits = le_max_credits - chan->rx_credits;
6663 
6664 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6665 
6666 	chan->rx_credits += return_credits;
6667 
6668 	pkt.cid     = cpu_to_le16(chan->scid);
6669 	pkt.credits = cpu_to_le16(return_credits);
6670 
6671 	chan->ident = l2cap_get_ident(conn);
6672 
6673 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6674 }
6675 
6676 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6677 {
6678 	int err;
6679 
6680 	if (!chan->rx_credits) {
6681 		BT_ERR("No credits to receive LE L2CAP data");
6682 		l2cap_send_disconn_req(chan, ECONNRESET);
6683 		return -ENOBUFS;
6684 	}
6685 
6686 	if (chan->imtu < skb->len) {
6687 		BT_ERR("Too big LE L2CAP PDU");
6688 		return -ENOBUFS;
6689 	}
6690 
6691 	chan->rx_credits--;
6692 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6693 
6694 	l2cap_chan_le_send_credits(chan);
6695 
6696 	err = 0;
6697 
6698 	if (!chan->sdu) {
6699 		u16 sdu_len;
6700 
6701 		sdu_len = get_unaligned_le16(skb->data);
6702 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6703 
6704 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6705 		       sdu_len, skb->len, chan->imtu);
6706 
6707 		if (sdu_len > chan->imtu) {
6708 			BT_ERR("Too big LE L2CAP SDU length received");
6709 			err = -EMSGSIZE;
6710 			goto failed;
6711 		}
6712 
6713 		if (skb->len > sdu_len) {
6714 			BT_ERR("Too much LE L2CAP data received");
6715 			err = -EINVAL;
6716 			goto failed;
6717 		}
6718 
6719 		if (skb->len == sdu_len)
6720 			return chan->ops->recv(chan, skb);
6721 
6722 		chan->sdu = skb;
6723 		chan->sdu_len = sdu_len;
6724 		chan->sdu_last_frag = skb;
6725 
6726 		return 0;
6727 	}
6728 
6729 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6730 	       chan->sdu->len, skb->len, chan->sdu_len);
6731 
6732 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6733 		BT_ERR("Too much LE L2CAP data received");
6734 		err = -EINVAL;
6735 		goto failed;
6736 	}
6737 
6738 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6739 	skb = NULL;
6740 
6741 	if (chan->sdu->len == chan->sdu_len) {
6742 		err = chan->ops->recv(chan, chan->sdu);
6743 		if (!err) {
6744 			chan->sdu = NULL;
6745 			chan->sdu_last_frag = NULL;
6746 			chan->sdu_len = 0;
6747 		}
6748 	}
6749 
6750 failed:
6751 	if (err) {
6752 		kfree_skb(skb);
6753 		kfree_skb(chan->sdu);
6754 		chan->sdu = NULL;
6755 		chan->sdu_last_frag = NULL;
6756 		chan->sdu_len = 0;
6757 	}
6758 
6759 	/* We can't return an error here since we took care of the skb
6760 	 * freeing internally. An error return would cause the caller to
6761 	 * do a double-free of the skb.
6762 	 */
6763 	return 0;
6764 }
6765 
6766 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6767 			       struct sk_buff *skb)
6768 {
6769 	struct l2cap_chan *chan;
6770 
6771 	chan = l2cap_get_chan_by_scid(conn, cid);
6772 	if (!chan) {
6773 		if (cid == L2CAP_CID_A2MP) {
6774 			chan = a2mp_channel_create(conn, skb);
6775 			if (!chan) {
6776 				kfree_skb(skb);
6777 				return;
6778 			}
6779 
6780 			l2cap_chan_lock(chan);
6781 		} else {
6782 			BT_DBG("unknown cid 0x%4.4x", cid);
6783 			/* Drop packet and return */
6784 			kfree_skb(skb);
6785 			return;
6786 		}
6787 	}
6788 
6789 	BT_DBG("chan %p, len %d", chan, skb->len);
6790 
6791 	if (chan->state != BT_CONNECTED)
6792 		goto drop;
6793 
6794 	switch (chan->mode) {
6795 	case L2CAP_MODE_LE_FLOWCTL:
6796 		if (l2cap_le_data_rcv(chan, skb) < 0)
6797 			goto drop;
6798 
6799 		goto done;
6800 
6801 	case L2CAP_MODE_BASIC:
6802 		/* If socket recv buffers overflows we drop data here
6803 		 * which is *bad* because L2CAP has to be reliable.
6804 		 * But we don't have any other choice. L2CAP doesn't
6805 		 * provide flow control mechanism. */
6806 
6807 		if (chan->imtu < skb->len) {
6808 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6809 			goto drop;
6810 		}
6811 
6812 		if (!chan->ops->recv(chan, skb))
6813 			goto done;
6814 		break;
6815 
6816 	case L2CAP_MODE_ERTM:
6817 	case L2CAP_MODE_STREAMING:
6818 		l2cap_data_rcv(chan, skb);
6819 		goto done;
6820 
6821 	default:
6822 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6823 		break;
6824 	}
6825 
6826 drop:
6827 	kfree_skb(skb);
6828 
6829 done:
6830 	l2cap_chan_unlock(chan);
6831 }
6832 
6833 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6834 				  struct sk_buff *skb)
6835 {
6836 	struct hci_conn *hcon = conn->hcon;
6837 	struct l2cap_chan *chan;
6838 
6839 	if (hcon->type != ACL_LINK)
6840 		goto drop;
6841 
6842 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6843 					ACL_LINK);
6844 	if (!chan)
6845 		goto drop;
6846 
6847 	BT_DBG("chan %p, len %d", chan, skb->len);
6848 
6849 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6850 		goto drop;
6851 
6852 	if (chan->imtu < skb->len)
6853 		goto drop;
6854 
6855 	/* Store remote BD_ADDR and PSM for msg_name */
6856 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6857 	bt_cb(skb)->psm = psm;
6858 
6859 	if (!chan->ops->recv(chan, skb))
6860 		return;
6861 
6862 drop:
6863 	kfree_skb(skb);
6864 }
6865 
6866 static void l2cap_att_channel(struct l2cap_conn *conn,
6867 			      struct sk_buff *skb)
6868 {
6869 	struct hci_conn *hcon = conn->hcon;
6870 	struct l2cap_chan *chan;
6871 
6872 	if (hcon->type != LE_LINK)
6873 		goto drop;
6874 
6875 	chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
6876 					 &hcon->src, &hcon->dst);
6877 	if (!chan)
6878 		goto drop;
6879 
6880 	BT_DBG("chan %p, len %d", chan, skb->len);
6881 
6882 	if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
6883 		goto drop;
6884 
6885 	if (chan->imtu < skb->len)
6886 		goto drop;
6887 
6888 	if (!chan->ops->recv(chan, skb))
6889 		return;
6890 
6891 drop:
6892 	kfree_skb(skb);
6893 }
6894 
6895 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6896 {
6897 	struct l2cap_hdr *lh = (void *) skb->data;
6898 	struct hci_conn *hcon = conn->hcon;
6899 	u16 cid, len;
6900 	__le16 psm;
6901 
6902 	if (hcon->state != BT_CONNECTED) {
6903 		BT_DBG("queueing pending rx skb");
6904 		skb_queue_tail(&conn->pending_rx, skb);
6905 		return;
6906 	}
6907 
6908 	skb_pull(skb, L2CAP_HDR_SIZE);
6909 	cid = __le16_to_cpu(lh->cid);
6910 	len = __le16_to_cpu(lh->len);
6911 
6912 	if (len != skb->len) {
6913 		kfree_skb(skb);
6914 		return;
6915 	}
6916 
6917 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6918 
6919 	switch (cid) {
6920 	case L2CAP_CID_SIGNALING:
6921 		l2cap_sig_channel(conn, skb);
6922 		break;
6923 
6924 	case L2CAP_CID_CONN_LESS:
6925 		psm = get_unaligned((__le16 *) skb->data);
6926 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6927 		l2cap_conless_channel(conn, psm, skb);
6928 		break;
6929 
6930 	case L2CAP_CID_ATT:
6931 		l2cap_att_channel(conn, skb);
6932 		break;
6933 
6934 	case L2CAP_CID_LE_SIGNALING:
6935 		l2cap_le_sig_channel(conn, skb);
6936 		break;
6937 
6938 	case L2CAP_CID_SMP:
6939 		if (smp_sig_channel(conn, skb))
6940 			l2cap_conn_del(conn->hcon, EACCES);
6941 		break;
6942 
6943 	case L2CAP_FC_6LOWPAN:
6944 		bt_6lowpan_recv(conn, skb);
6945 		break;
6946 
6947 	default:
6948 		l2cap_data_channel(conn, cid, skb);
6949 		break;
6950 	}
6951 }
6952 
6953 static void process_pending_rx(struct work_struct *work)
6954 {
6955 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6956 					       pending_rx_work);
6957 	struct sk_buff *skb;
6958 
6959 	BT_DBG("");
6960 
6961 	while ((skb = skb_dequeue(&conn->pending_rx)))
6962 		l2cap_recv_frame(conn, skb);
6963 }
6964 
6965 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6966 {
6967 	struct l2cap_conn *conn = hcon->l2cap_data;
6968 	struct hci_chan *hchan;
6969 
6970 	if (conn)
6971 		return conn;
6972 
6973 	hchan = hci_chan_create(hcon);
6974 	if (!hchan)
6975 		return NULL;
6976 
6977 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
6978 	if (!conn) {
6979 		hci_chan_del(hchan);
6980 		return NULL;
6981 	}
6982 
6983 	kref_init(&conn->ref);
6984 	hcon->l2cap_data = conn;
6985 	conn->hcon = hcon;
6986 	hci_conn_get(conn->hcon);
6987 	conn->hchan = hchan;
6988 
6989 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6990 
6991 	switch (hcon->type) {
6992 	case LE_LINK:
6993 		if (hcon->hdev->le_mtu) {
6994 			conn->mtu = hcon->hdev->le_mtu;
6995 			break;
6996 		}
6997 		/* fall through */
6998 	default:
6999 		conn->mtu = hcon->hdev->acl_mtu;
7000 		break;
7001 	}
7002 
7003 	conn->feat_mask = 0;
7004 
7005 	if (hcon->type == ACL_LINK)
7006 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
7007 					    &hcon->hdev->dev_flags);
7008 
7009 	spin_lock_init(&conn->lock);
7010 	mutex_init(&conn->chan_lock);
7011 
7012 	INIT_LIST_HEAD(&conn->chan_l);
7013 	INIT_LIST_HEAD(&conn->users);
7014 
7015 	if (hcon->type == LE_LINK)
7016 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
7017 	else
7018 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7019 
7020 	skb_queue_head_init(&conn->pending_rx);
7021 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7022 
7023 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7024 
7025 	return conn;
7026 }
7027 
7028 static bool is_valid_psm(u16 psm, u8 dst_type) {
7029 	if (!psm)
7030 		return false;
7031 
7032 	if (bdaddr_type_is_le(dst_type))
7033 		return (psm <= 0x00ff);
7034 
7035 	/* PSM must be odd and lsb of upper byte must be 0 */
7036 	return ((psm & 0x0101) == 0x0001);
7037 }
7038 
7039 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7040 		       bdaddr_t *dst, u8 dst_type)
7041 {
7042 	struct l2cap_conn *conn;
7043 	struct hci_conn *hcon;
7044 	struct hci_dev *hdev;
7045 	__u8 auth_type;
7046 	int err;
7047 
7048 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7049 	       dst_type, __le16_to_cpu(psm));
7050 
7051 	hdev = hci_get_route(dst, &chan->src);
7052 	if (!hdev)
7053 		return -EHOSTUNREACH;
7054 
7055 	hci_dev_lock(hdev);
7056 
7057 	l2cap_chan_lock(chan);
7058 
7059 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7060 	    chan->chan_type != L2CAP_CHAN_RAW) {
7061 		err = -EINVAL;
7062 		goto done;
7063 	}
7064 
7065 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7066 		err = -EINVAL;
7067 		goto done;
7068 	}
7069 
7070 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7071 		err = -EINVAL;
7072 		goto done;
7073 	}
7074 
7075 	switch (chan->mode) {
7076 	case L2CAP_MODE_BASIC:
7077 		break;
7078 	case L2CAP_MODE_LE_FLOWCTL:
7079 		l2cap_le_flowctl_init(chan);
7080 		break;
7081 	case L2CAP_MODE_ERTM:
7082 	case L2CAP_MODE_STREAMING:
7083 		if (!disable_ertm)
7084 			break;
7085 		/* fall through */
7086 	default:
7087 		err = -ENOTSUPP;
7088 		goto done;
7089 	}
7090 
7091 	switch (chan->state) {
7092 	case BT_CONNECT:
7093 	case BT_CONNECT2:
7094 	case BT_CONFIG:
7095 		/* Already connecting */
7096 		err = 0;
7097 		goto done;
7098 
7099 	case BT_CONNECTED:
7100 		/* Already connected */
7101 		err = -EISCONN;
7102 		goto done;
7103 
7104 	case BT_OPEN:
7105 	case BT_BOUND:
7106 		/* Can connect */
7107 		break;
7108 
7109 	default:
7110 		err = -EBADFD;
7111 		goto done;
7112 	}
7113 
7114 	/* Set destination address and psm */
7115 	bacpy(&chan->dst, dst);
7116 	chan->dst_type = dst_type;
7117 
7118 	chan->psm = psm;
7119 	chan->dcid = cid;
7120 
7121 	auth_type = l2cap_get_auth_type(chan);
7122 
7123 	if (bdaddr_type_is_le(dst_type)) {
7124 		/* Convert from L2CAP channel address type to HCI address type
7125 		 */
7126 		if (dst_type == BDADDR_LE_PUBLIC)
7127 			dst_type = ADDR_LE_DEV_PUBLIC;
7128 		else
7129 			dst_type = ADDR_LE_DEV_RANDOM;
7130 
7131 		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7132 				      auth_type);
7133 	} else {
7134 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7135 	}
7136 
7137 	if (IS_ERR(hcon)) {
7138 		err = PTR_ERR(hcon);
7139 		goto done;
7140 	}
7141 
7142 	conn = l2cap_conn_add(hcon);
7143 	if (!conn) {
7144 		hci_conn_drop(hcon);
7145 		err = -ENOMEM;
7146 		goto done;
7147 	}
7148 
7149 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7150 		hci_conn_drop(hcon);
7151 		err = -EBUSY;
7152 		goto done;
7153 	}
7154 
7155 	/* Update source addr of the socket */
7156 	bacpy(&chan->src, &hcon->src);
7157 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
7158 
7159 	l2cap_chan_unlock(chan);
7160 	l2cap_chan_add(conn, chan);
7161 	l2cap_chan_lock(chan);
7162 
7163 	/* l2cap_chan_add takes its own ref so we can drop this one */
7164 	hci_conn_drop(hcon);
7165 
7166 	l2cap_state_change(chan, BT_CONNECT);
7167 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7168 
7169 	/* Release chan->sport so that it can be reused by other
7170 	 * sockets (as it's only used for listening sockets).
7171 	 */
7172 	write_lock(&chan_list_lock);
7173 	chan->sport = 0;
7174 	write_unlock(&chan_list_lock);
7175 
7176 	if (hcon->state == BT_CONNECTED) {
7177 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7178 			__clear_chan_timer(chan);
7179 			if (l2cap_chan_check_security(chan))
7180 				l2cap_state_change(chan, BT_CONNECTED);
7181 		} else
7182 			l2cap_do_start(chan);
7183 	}
7184 
7185 	err = 0;
7186 
7187 done:
7188 	l2cap_chan_unlock(chan);
7189 	hci_dev_unlock(hdev);
7190 	hci_dev_put(hdev);
7191 	return err;
7192 }
7193 
7194 /* ---- L2CAP interface with lower layer (HCI) ---- */
7195 
7196 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7197 {
7198 	int exact = 0, lm1 = 0, lm2 = 0;
7199 	struct l2cap_chan *c;
7200 
7201 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7202 
7203 	/* Find listening sockets and check their link_mode */
7204 	read_lock(&chan_list_lock);
7205 	list_for_each_entry(c, &chan_list, global_l) {
7206 		if (c->state != BT_LISTEN)
7207 			continue;
7208 
7209 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7210 			lm1 |= HCI_LM_ACCEPT;
7211 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7212 				lm1 |= HCI_LM_MASTER;
7213 			exact++;
7214 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7215 			lm2 |= HCI_LM_ACCEPT;
7216 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7217 				lm2 |= HCI_LM_MASTER;
7218 		}
7219 	}
7220 	read_unlock(&chan_list_lock);
7221 
7222 	return exact ? lm1 : lm2;
7223 }
7224 
7225 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7226 {
7227 	struct l2cap_conn *conn;
7228 
7229 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7230 
7231 	if (!status) {
7232 		conn = l2cap_conn_add(hcon);
7233 		if (conn)
7234 			l2cap_conn_ready(conn);
7235 	} else {
7236 		l2cap_conn_del(hcon, bt_to_errno(status));
7237 	}
7238 }
7239 
7240 int l2cap_disconn_ind(struct hci_conn *hcon)
7241 {
7242 	struct l2cap_conn *conn = hcon->l2cap_data;
7243 
7244 	BT_DBG("hcon %p", hcon);
7245 
7246 	if (!conn)
7247 		return HCI_ERROR_REMOTE_USER_TERM;
7248 	return conn->disc_reason;
7249 }
7250 
7251 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7252 {
7253 	BT_DBG("hcon %p reason %d", hcon, reason);
7254 
7255 	bt_6lowpan_del_conn(hcon->l2cap_data);
7256 
7257 	l2cap_conn_del(hcon, bt_to_errno(reason));
7258 }
7259 
7260 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7261 {
7262 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7263 		return;
7264 
7265 	if (encrypt == 0x00) {
7266 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7267 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7268 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7269 			   chan->sec_level == BT_SECURITY_FIPS)
7270 			l2cap_chan_close(chan, ECONNREFUSED);
7271 	} else {
7272 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7273 			__clear_chan_timer(chan);
7274 	}
7275 }
7276 
7277 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7278 {
7279 	struct l2cap_conn *conn = hcon->l2cap_data;
7280 	struct l2cap_chan *chan;
7281 
7282 	if (!conn)
7283 		return 0;
7284 
7285 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7286 
7287 	if (hcon->type == LE_LINK) {
7288 		if (!status && encrypt)
7289 			smp_distribute_keys(conn);
7290 		cancel_delayed_work(&conn->security_timer);
7291 	}
7292 
7293 	mutex_lock(&conn->chan_lock);
7294 
7295 	list_for_each_entry(chan, &conn->chan_l, list) {
7296 		l2cap_chan_lock(chan);
7297 
7298 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7299 		       state_to_string(chan->state));
7300 
7301 		if (chan->scid == L2CAP_CID_A2MP) {
7302 			l2cap_chan_unlock(chan);
7303 			continue;
7304 		}
7305 
7306 		if (chan->scid == L2CAP_CID_ATT) {
7307 			if (!status && encrypt) {
7308 				chan->sec_level = hcon->sec_level;
7309 				l2cap_chan_ready(chan);
7310 			}
7311 
7312 			l2cap_chan_unlock(chan);
7313 			continue;
7314 		}
7315 
7316 		if (!__l2cap_no_conn_pending(chan)) {
7317 			l2cap_chan_unlock(chan);
7318 			continue;
7319 		}
7320 
7321 		if (!status && (chan->state == BT_CONNECTED ||
7322 				chan->state == BT_CONFIG)) {
7323 			chan->ops->resume(chan);
7324 			l2cap_check_encryption(chan, encrypt);
7325 			l2cap_chan_unlock(chan);
7326 			continue;
7327 		}
7328 
7329 		if (chan->state == BT_CONNECT) {
7330 			if (!status)
7331 				l2cap_start_connection(chan);
7332 			else
7333 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7334 		} else if (chan->state == BT_CONNECT2) {
7335 			struct l2cap_conn_rsp rsp;
7336 			__u16 res, stat;
7337 
7338 			if (!status) {
7339 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7340 					res = L2CAP_CR_PEND;
7341 					stat = L2CAP_CS_AUTHOR_PEND;
7342 					chan->ops->defer(chan);
7343 				} else {
7344 					l2cap_state_change(chan, BT_CONFIG);
7345 					res = L2CAP_CR_SUCCESS;
7346 					stat = L2CAP_CS_NO_INFO;
7347 				}
7348 			} else {
7349 				l2cap_state_change(chan, BT_DISCONN);
7350 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7351 				res = L2CAP_CR_SEC_BLOCK;
7352 				stat = L2CAP_CS_NO_INFO;
7353 			}
7354 
7355 			rsp.scid   = cpu_to_le16(chan->dcid);
7356 			rsp.dcid   = cpu_to_le16(chan->scid);
7357 			rsp.result = cpu_to_le16(res);
7358 			rsp.status = cpu_to_le16(stat);
7359 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7360 				       sizeof(rsp), &rsp);
7361 
7362 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7363 			    res == L2CAP_CR_SUCCESS) {
7364 				char buf[128];
7365 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7366 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7367 					       L2CAP_CONF_REQ,
7368 					       l2cap_build_conf_req(chan, buf),
7369 					       buf);
7370 				chan->num_conf_req++;
7371 			}
7372 		}
7373 
7374 		l2cap_chan_unlock(chan);
7375 	}
7376 
7377 	mutex_unlock(&conn->chan_lock);
7378 
7379 	return 0;
7380 }
7381 
7382 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7383 {
7384 	struct l2cap_conn *conn = hcon->l2cap_data;
7385 	struct l2cap_hdr *hdr;
7386 	int len;
7387 
7388 	/* For AMP controller do not create l2cap conn */
7389 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7390 		goto drop;
7391 
7392 	if (!conn)
7393 		conn = l2cap_conn_add(hcon);
7394 
7395 	if (!conn)
7396 		goto drop;
7397 
7398 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7399 
7400 	switch (flags) {
7401 	case ACL_START:
7402 	case ACL_START_NO_FLUSH:
7403 	case ACL_COMPLETE:
7404 		if (conn->rx_len) {
7405 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7406 			kfree_skb(conn->rx_skb);
7407 			conn->rx_skb = NULL;
7408 			conn->rx_len = 0;
7409 			l2cap_conn_unreliable(conn, ECOMM);
7410 		}
7411 
7412 		/* Start fragment always begin with Basic L2CAP header */
7413 		if (skb->len < L2CAP_HDR_SIZE) {
7414 			BT_ERR("Frame is too short (len %d)", skb->len);
7415 			l2cap_conn_unreliable(conn, ECOMM);
7416 			goto drop;
7417 		}
7418 
7419 		hdr = (struct l2cap_hdr *) skb->data;
7420 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7421 
7422 		if (len == skb->len) {
7423 			/* Complete frame received */
7424 			l2cap_recv_frame(conn, skb);
7425 			return 0;
7426 		}
7427 
7428 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7429 
7430 		if (skb->len > len) {
7431 			BT_ERR("Frame is too long (len %d, expected len %d)",
7432 			       skb->len, len);
7433 			l2cap_conn_unreliable(conn, ECOMM);
7434 			goto drop;
7435 		}
7436 
7437 		/* Allocate skb for the complete frame (with header) */
7438 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7439 		if (!conn->rx_skb)
7440 			goto drop;
7441 
7442 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7443 					  skb->len);
7444 		conn->rx_len = len - skb->len;
7445 		break;
7446 
7447 	case ACL_CONT:
7448 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7449 
7450 		if (!conn->rx_len) {
7451 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7452 			l2cap_conn_unreliable(conn, ECOMM);
7453 			goto drop;
7454 		}
7455 
7456 		if (skb->len > conn->rx_len) {
7457 			BT_ERR("Fragment is too long (len %d, expected %d)",
7458 			       skb->len, conn->rx_len);
7459 			kfree_skb(conn->rx_skb);
7460 			conn->rx_skb = NULL;
7461 			conn->rx_len = 0;
7462 			l2cap_conn_unreliable(conn, ECOMM);
7463 			goto drop;
7464 		}
7465 
7466 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7467 					  skb->len);
7468 		conn->rx_len -= skb->len;
7469 
7470 		if (!conn->rx_len) {
7471 			/* Complete frame received. l2cap_recv_frame
7472 			 * takes ownership of the skb so set the global
7473 			 * rx_skb pointer to NULL first.
7474 			 */
7475 			struct sk_buff *rx_skb = conn->rx_skb;
7476 			conn->rx_skb = NULL;
7477 			l2cap_recv_frame(conn, rx_skb);
7478 		}
7479 		break;
7480 	}
7481 
7482 drop:
7483 	kfree_skb(skb);
7484 	return 0;
7485 }
7486 
7487 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7488 {
7489 	struct l2cap_chan *c;
7490 
7491 	read_lock(&chan_list_lock);
7492 
7493 	list_for_each_entry(c, &chan_list, global_l) {
7494 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7495 			   &c->src, &c->dst,
7496 			   c->state, __le16_to_cpu(c->psm),
7497 			   c->scid, c->dcid, c->imtu, c->omtu,
7498 			   c->sec_level, c->mode);
7499 	}
7500 
7501 	read_unlock(&chan_list_lock);
7502 
7503 	return 0;
7504 }
7505 
7506 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7507 {
7508 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7509 }
7510 
7511 static const struct file_operations l2cap_debugfs_fops = {
7512 	.open		= l2cap_debugfs_open,
7513 	.read		= seq_read,
7514 	.llseek		= seq_lseek,
7515 	.release	= single_release,
7516 };
7517 
7518 static struct dentry *l2cap_debugfs;
7519 
7520 int __init l2cap_init(void)
7521 {
7522 	int err;
7523 
7524 	err = l2cap_init_sockets();
7525 	if (err < 0)
7526 		return err;
7527 
7528 	if (IS_ERR_OR_NULL(bt_debugfs))
7529 		return 0;
7530 
7531 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7532 					    NULL, &l2cap_debugfs_fops);
7533 
7534 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7535 			   &le_max_credits);
7536 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7537 			   &le_default_mps);
7538 
7539 	bt_6lowpan_init();
7540 
7541 	return 0;
7542 }
7543 
7544 void l2cap_exit(void)
7545 {
7546 	bt_6lowpan_cleanup();
7547 	debugfs_remove(l2cap_debugfs);
7548 	l2cap_cleanup_sockets();
7549 }
7550 
7551 module_param(disable_ertm, bool, 0644);
7552 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7553