xref: /linux/net/bluetooth/l2cap_core.c (revision 9d106c6dd81bb26ad7fc3ee89cb1d62557c8e2c9)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 {
67 	if (link_type == LE_LINK) {
68 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 			return BDADDR_LE_PUBLIC;
70 		else
71 			return BDADDR_LE_RANDOM;
72 	}
73 
74 	return BDADDR_BREDR;
75 }
76 
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 {
79 	return bdaddr_type(hcon->type, hcon->src_type);
80 }
81 
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 {
84 	return bdaddr_type(hcon->type, hcon->dst_type);
85 }
86 
87 /* ---- L2CAP channels ---- */
88 
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
90 						   u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	list_for_each_entry(c, &conn->chan_l, list) {
95 		if (c->dcid == cid)
96 			return c;
97 	}
98 	return NULL;
99 }
100 
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
102 						   u16 cid)
103 {
104 	struct l2cap_chan *c;
105 
106 	list_for_each_entry(c, &conn->chan_l, list) {
107 		if (c->scid == cid)
108 			return c;
109 	}
110 	return NULL;
111 }
112 
113 /* Find channel with given SCID.
114  * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
116 						 u16 cid)
117 {
118 	struct l2cap_chan *c;
119 
120 	mutex_lock(&conn->chan_lock);
121 	c = __l2cap_get_chan_by_scid(conn, cid);
122 	if (c)
123 		l2cap_chan_lock(c);
124 	mutex_unlock(&conn->chan_lock);
125 
126 	return c;
127 }
128 
129 /* Find channel with given DCID.
130  * Returns locked channel.
131  */
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
133 						 u16 cid)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_dcid(conn, cid);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
147 						    u8 ident)
148 {
149 	struct l2cap_chan *c;
150 
151 	list_for_each_entry(c, &conn->chan_l, list) {
152 		if (c->ident == ident)
153 			return c;
154 	}
155 	return NULL;
156 }
157 
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						  u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	mutex_lock(&conn->chan_lock);
164 	c = __l2cap_get_chan_by_ident(conn, ident);
165 	if (c)
166 		l2cap_chan_lock(c);
167 	mutex_unlock(&conn->chan_lock);
168 
169 	return c;
170 }
171 
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
173 						      u8 src_type)
174 {
175 	struct l2cap_chan *c;
176 
177 	list_for_each_entry(c, &chan_list, global_l) {
178 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
179 			continue;
180 
181 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
182 			continue;
183 
184 		if (c->sport == psm && !bacmp(&c->src, src))
185 			return c;
186 	}
187 	return NULL;
188 }
189 
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
191 {
192 	int err;
193 
194 	write_lock(&chan_list_lock);
195 
196 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
197 		err = -EADDRINUSE;
198 		goto done;
199 	}
200 
201 	if (psm) {
202 		chan->psm = psm;
203 		chan->sport = psm;
204 		err = 0;
205 	} else {
206 		u16 p, start, end, incr;
207 
208 		if (chan->src_type == BDADDR_BREDR) {
209 			start = L2CAP_PSM_DYN_START;
210 			end = L2CAP_PSM_AUTO_END;
211 			incr = 2;
212 		} else {
213 			start = L2CAP_PSM_LE_DYN_START;
214 			end = L2CAP_PSM_LE_DYN_END;
215 			incr = 1;
216 		}
217 
218 		err = -EINVAL;
219 		for (p = start; p <= end; p += incr)
220 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
221 							 chan->src_type)) {
222 				chan->psm   = cpu_to_le16(p);
223 				chan->sport = cpu_to_le16(p);
224 				err = 0;
225 				break;
226 			}
227 	}
228 
229 done:
230 	write_unlock(&chan_list_lock);
231 	return err;
232 }
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
234 
235 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
236 {
237 	write_lock(&chan_list_lock);
238 
239 	/* Override the defaults (which are for conn-oriented) */
240 	chan->omtu = L2CAP_DEFAULT_MTU;
241 	chan->chan_type = L2CAP_CHAN_FIXED;
242 
243 	chan->scid = scid;
244 
245 	write_unlock(&chan_list_lock);
246 
247 	return 0;
248 }
249 
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
251 {
252 	u16 cid, dyn_end;
253 
254 	if (conn->hcon->type == LE_LINK)
255 		dyn_end = L2CAP_CID_LE_DYN_END;
256 	else
257 		dyn_end = L2CAP_CID_DYN_END;
258 
259 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 		if (!__l2cap_get_chan_by_scid(conn, cid))
261 			return cid;
262 	}
263 
264 	return 0;
265 }
266 
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
268 {
269 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 	       state_to_string(state));
271 
272 	chan->state = state;
273 	chan->ops->state_change(chan, state, 0);
274 }
275 
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
277 						int state, int err)
278 {
279 	chan->state = state;
280 	chan->ops->state_change(chan, chan->state, err);
281 }
282 
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
284 {
285 	chan->ops->state_change(chan, chan->state, err);
286 }
287 
288 static void __set_retrans_timer(struct l2cap_chan *chan)
289 {
290 	if (!delayed_work_pending(&chan->monitor_timer) &&
291 	    chan->retrans_timeout) {
292 		l2cap_set_timer(chan, &chan->retrans_timer,
293 				msecs_to_jiffies(chan->retrans_timeout));
294 	}
295 }
296 
297 static void __set_monitor_timer(struct l2cap_chan *chan)
298 {
299 	__clear_retrans_timer(chan);
300 	if (chan->monitor_timeout) {
301 		l2cap_set_timer(chan, &chan->monitor_timer,
302 				msecs_to_jiffies(chan->monitor_timeout));
303 	}
304 }
305 
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
307 					       u16 seq)
308 {
309 	struct sk_buff *skb;
310 
311 	skb_queue_walk(head, skb) {
312 		if (bt_cb(skb)->l2cap.txseq == seq)
313 			return skb;
314 	}
315 
316 	return NULL;
317 }
318 
319 /* ---- L2CAP sequence number lists ---- */
320 
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322  * SREJ requests that are received and for frames that are to be
323  * retransmitted. These seq_list functions implement a singly-linked
324  * list in an array, where membership in the list can also be checked
325  * in constant time. Items can also be added to the tail of the list
326  * and removed from the head in constant time, without further memory
327  * allocs or frees.
328  */
329 
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
331 {
332 	size_t alloc_size, i;
333 
334 	/* Allocated size is a power of 2 to map sequence numbers
335 	 * (which may be up to 14 bits) in to a smaller array that is
336 	 * sized for the negotiated ERTM transmit windows.
337 	 */
338 	alloc_size = roundup_pow_of_two(size);
339 
340 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
341 	if (!seq_list->list)
342 		return -ENOMEM;
343 
344 	seq_list->mask = alloc_size - 1;
345 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 	for (i = 0; i < alloc_size; i++)
348 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
349 
350 	return 0;
351 }
352 
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
354 {
355 	kfree(seq_list->list);
356 }
357 
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
359 					   u16 seq)
360 {
361 	/* Constant-time check for list membership */
362 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
363 }
364 
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
366 {
367 	u16 seq = seq_list->head;
368 	u16 mask = seq_list->mask;
369 
370 	seq_list->head = seq_list->list[seq & mask];
371 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
372 
373 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 	}
377 
378 	return seq;
379 }
380 
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
382 {
383 	u16 i;
384 
385 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
386 		return;
387 
388 	for (i = 0; i <= seq_list->mask; i++)
389 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
390 
391 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
393 }
394 
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
396 {
397 	u16 mask = seq_list->mask;
398 
399 	/* All appends happen in constant time */
400 
401 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 		seq_list->head = seq;
406 	else
407 		seq_list->list[seq_list->tail & mask] = seq;
408 
409 	seq_list->tail = seq;
410 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
411 }
412 
413 static void l2cap_chan_timeout(struct work_struct *work)
414 {
415 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
416 					       chan_timer.work);
417 	struct l2cap_conn *conn = chan->conn;
418 	int reason;
419 
420 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
421 
422 	mutex_lock(&conn->chan_lock);
423 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 	 * this work. No need to call l2cap_chan_hold(chan) here again.
425 	 */
426 	l2cap_chan_lock(chan);
427 
428 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 		reason = ECONNREFUSED;
430 	else if (chan->state == BT_CONNECT &&
431 		 chan->sec_level != BT_SECURITY_SDP)
432 		reason = ECONNREFUSED;
433 	else
434 		reason = ETIMEDOUT;
435 
436 	l2cap_chan_close(chan, reason);
437 
438 	chan->ops->close(chan);
439 
440 	l2cap_chan_unlock(chan);
441 	l2cap_chan_put(chan);
442 
443 	mutex_unlock(&conn->chan_lock);
444 }
445 
446 struct l2cap_chan *l2cap_chan_create(void)
447 {
448 	struct l2cap_chan *chan;
449 
450 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
451 	if (!chan)
452 		return NULL;
453 
454 	mutex_init(&chan->lock);
455 
456 	/* Set default lock nesting level */
457 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
458 
459 	write_lock(&chan_list_lock);
460 	list_add(&chan->global_l, &chan_list);
461 	write_unlock(&chan_list_lock);
462 
463 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464 
465 	chan->state = BT_OPEN;
466 
467 	kref_init(&chan->kref);
468 
469 	/* This flag is cleared in l2cap_chan_ready() */
470 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
471 
472 	BT_DBG("chan %p", chan);
473 
474 	return chan;
475 }
476 EXPORT_SYMBOL_GPL(l2cap_chan_create);
477 
478 static void l2cap_chan_destroy(struct kref *kref)
479 {
480 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
481 
482 	BT_DBG("chan %p", chan);
483 
484 	write_lock(&chan_list_lock);
485 	list_del(&chan->global_l);
486 	write_unlock(&chan_list_lock);
487 
488 	kfree(chan);
489 }
490 
491 void l2cap_chan_hold(struct l2cap_chan *c)
492 {
493 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
494 
495 	kref_get(&c->kref);
496 }
497 
498 void l2cap_chan_put(struct l2cap_chan *c)
499 {
500 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
501 
502 	kref_put(&c->kref, l2cap_chan_destroy);
503 }
504 EXPORT_SYMBOL_GPL(l2cap_chan_put);
505 
506 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
507 {
508 	chan->fcs  = L2CAP_FCS_CRC16;
509 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
510 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
511 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
512 	chan->remote_max_tx = chan->max_tx;
513 	chan->remote_tx_win = chan->tx_win;
514 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
515 	chan->sec_level = BT_SECURITY_LOW;
516 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
517 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
518 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
519 	chan->conf_state = 0;
520 
521 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
522 }
523 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
524 
525 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
526 {
527 	chan->sdu = NULL;
528 	chan->sdu_last_frag = NULL;
529 	chan->sdu_len = 0;
530 	chan->tx_credits = tx_credits;
531 	/* Derive MPS from connection MTU to stop HCI fragmentation */
532 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
533 	/* Give enough credits for a full packet */
534 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
535 
536 	skb_queue_head_init(&chan->tx_q);
537 }
538 
539 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
540 {
541 	l2cap_le_flowctl_init(chan, tx_credits);
542 
543 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
544 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
545 		chan->mps = L2CAP_ECRED_MIN_MPS;
546 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
547 	}
548 }
549 
550 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
551 {
552 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
553 	       __le16_to_cpu(chan->psm), chan->dcid);
554 
555 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
556 
557 	chan->conn = conn;
558 
559 	switch (chan->chan_type) {
560 	case L2CAP_CHAN_CONN_ORIENTED:
561 		/* Alloc CID for connection-oriented socket */
562 		chan->scid = l2cap_alloc_cid(conn);
563 		if (conn->hcon->type == ACL_LINK)
564 			chan->omtu = L2CAP_DEFAULT_MTU;
565 		break;
566 
567 	case L2CAP_CHAN_CONN_LESS:
568 		/* Connectionless socket */
569 		chan->scid = L2CAP_CID_CONN_LESS;
570 		chan->dcid = L2CAP_CID_CONN_LESS;
571 		chan->omtu = L2CAP_DEFAULT_MTU;
572 		break;
573 
574 	case L2CAP_CHAN_FIXED:
575 		/* Caller will set CID and CID specific MTU values */
576 		break;
577 
578 	default:
579 		/* Raw socket can send/recv signalling messages only */
580 		chan->scid = L2CAP_CID_SIGNALING;
581 		chan->dcid = L2CAP_CID_SIGNALING;
582 		chan->omtu = L2CAP_DEFAULT_MTU;
583 	}
584 
585 	chan->local_id		= L2CAP_BESTEFFORT_ID;
586 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
587 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
588 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
589 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
590 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
591 
592 	l2cap_chan_hold(chan);
593 
594 	/* Only keep a reference for fixed channels if they requested it */
595 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
596 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
597 		hci_conn_hold(conn->hcon);
598 
599 	list_add(&chan->list, &conn->chan_l);
600 }
601 
602 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
603 {
604 	mutex_lock(&conn->chan_lock);
605 	__l2cap_chan_add(conn, chan);
606 	mutex_unlock(&conn->chan_lock);
607 }
608 
609 void l2cap_chan_del(struct l2cap_chan *chan, int err)
610 {
611 	struct l2cap_conn *conn = chan->conn;
612 
613 	__clear_chan_timer(chan);
614 
615 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
616 	       state_to_string(chan->state));
617 
618 	chan->ops->teardown(chan, err);
619 
620 	if (conn) {
621 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
622 		/* Delete from channel list */
623 		list_del(&chan->list);
624 
625 		l2cap_chan_put(chan);
626 
627 		chan->conn = NULL;
628 
629 		/* Reference was only held for non-fixed channels or
630 		 * fixed channels that explicitly requested it using the
631 		 * FLAG_HOLD_HCI_CONN flag.
632 		 */
633 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
634 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
635 			hci_conn_drop(conn->hcon);
636 
637 		if (mgr && mgr->bredr_chan == chan)
638 			mgr->bredr_chan = NULL;
639 	}
640 
641 	if (chan->hs_hchan) {
642 		struct hci_chan *hs_hchan = chan->hs_hchan;
643 
644 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
645 		amp_disconnect_logical_link(hs_hchan);
646 	}
647 
648 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
649 		return;
650 
651 	switch(chan->mode) {
652 	case L2CAP_MODE_BASIC:
653 		break;
654 
655 	case L2CAP_MODE_LE_FLOWCTL:
656 	case L2CAP_MODE_EXT_FLOWCTL:
657 		skb_queue_purge(&chan->tx_q);
658 		break;
659 
660 	case L2CAP_MODE_ERTM:
661 		__clear_retrans_timer(chan);
662 		__clear_monitor_timer(chan);
663 		__clear_ack_timer(chan);
664 
665 		skb_queue_purge(&chan->srej_q);
666 
667 		l2cap_seq_list_free(&chan->srej_list);
668 		l2cap_seq_list_free(&chan->retrans_list);
669 
670 		/* fall through */
671 
672 	case L2CAP_MODE_STREAMING:
673 		skb_queue_purge(&chan->tx_q);
674 		break;
675 	}
676 
677 	return;
678 }
679 EXPORT_SYMBOL_GPL(l2cap_chan_del);
680 
681 static void l2cap_conn_update_id_addr(struct work_struct *work)
682 {
683 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
684 					       id_addr_update_work);
685 	struct hci_conn *hcon = conn->hcon;
686 	struct l2cap_chan *chan;
687 
688 	mutex_lock(&conn->chan_lock);
689 
690 	list_for_each_entry(chan, &conn->chan_l, list) {
691 		l2cap_chan_lock(chan);
692 		bacpy(&chan->dst, &hcon->dst);
693 		chan->dst_type = bdaddr_dst_type(hcon);
694 		l2cap_chan_unlock(chan);
695 	}
696 
697 	mutex_unlock(&conn->chan_lock);
698 }
699 
700 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
701 {
702 	struct l2cap_conn *conn = chan->conn;
703 	struct l2cap_le_conn_rsp rsp;
704 	u16 result;
705 
706 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 		result = L2CAP_CR_LE_AUTHORIZATION;
708 	else
709 		result = L2CAP_CR_LE_BAD_PSM;
710 
711 	l2cap_state_change(chan, BT_DISCONN);
712 
713 	rsp.dcid    = cpu_to_le16(chan->scid);
714 	rsp.mtu     = cpu_to_le16(chan->imtu);
715 	rsp.mps     = cpu_to_le16(chan->mps);
716 	rsp.credits = cpu_to_le16(chan->rx_credits);
717 	rsp.result  = cpu_to_le16(result);
718 
719 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
720 		       &rsp);
721 }
722 
723 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
724 {
725 	struct l2cap_conn *conn = chan->conn;
726 	struct l2cap_ecred_conn_rsp rsp;
727 	u16 result;
728 
729 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
730 		result = L2CAP_CR_LE_AUTHORIZATION;
731 	else
732 		result = L2CAP_CR_LE_BAD_PSM;
733 
734 	l2cap_state_change(chan, BT_DISCONN);
735 
736 	memset(&rsp, 0, sizeof(rsp));
737 
738 	rsp.result  = cpu_to_le16(result);
739 
740 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
741 		       &rsp);
742 }
743 
744 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
745 {
746 	struct l2cap_conn *conn = chan->conn;
747 	struct l2cap_conn_rsp rsp;
748 	u16 result;
749 
750 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
751 		result = L2CAP_CR_SEC_BLOCK;
752 	else
753 		result = L2CAP_CR_BAD_PSM;
754 
755 	l2cap_state_change(chan, BT_DISCONN);
756 
757 	rsp.scid   = cpu_to_le16(chan->dcid);
758 	rsp.dcid   = cpu_to_le16(chan->scid);
759 	rsp.result = cpu_to_le16(result);
760 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
761 
762 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
763 }
764 
765 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
766 {
767 	struct l2cap_conn *conn = chan->conn;
768 
769 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
770 
771 	switch (chan->state) {
772 	case BT_LISTEN:
773 		chan->ops->teardown(chan, 0);
774 		break;
775 
776 	case BT_CONNECTED:
777 	case BT_CONFIG:
778 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
779 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
780 			l2cap_send_disconn_req(chan, reason);
781 		} else
782 			l2cap_chan_del(chan, reason);
783 		break;
784 
785 	case BT_CONNECT2:
786 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
787 			if (conn->hcon->type == ACL_LINK)
788 				l2cap_chan_connect_reject(chan);
789 			else if (conn->hcon->type == LE_LINK) {
790 				switch (chan->mode) {
791 				case L2CAP_MODE_LE_FLOWCTL:
792 					l2cap_chan_le_connect_reject(chan);
793 					break;
794 				case L2CAP_MODE_EXT_FLOWCTL:
795 					l2cap_chan_ecred_connect_reject(chan);
796 					break;
797 				}
798 			}
799 		}
800 
801 		l2cap_chan_del(chan, reason);
802 		break;
803 
804 	case BT_CONNECT:
805 	case BT_DISCONN:
806 		l2cap_chan_del(chan, reason);
807 		break;
808 
809 	default:
810 		chan->ops->teardown(chan, 0);
811 		break;
812 	}
813 }
814 EXPORT_SYMBOL(l2cap_chan_close);
815 
816 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
817 {
818 	switch (chan->chan_type) {
819 	case L2CAP_CHAN_RAW:
820 		switch (chan->sec_level) {
821 		case BT_SECURITY_HIGH:
822 		case BT_SECURITY_FIPS:
823 			return HCI_AT_DEDICATED_BONDING_MITM;
824 		case BT_SECURITY_MEDIUM:
825 			return HCI_AT_DEDICATED_BONDING;
826 		default:
827 			return HCI_AT_NO_BONDING;
828 		}
829 		break;
830 	case L2CAP_CHAN_CONN_LESS:
831 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
832 			if (chan->sec_level == BT_SECURITY_LOW)
833 				chan->sec_level = BT_SECURITY_SDP;
834 		}
835 		if (chan->sec_level == BT_SECURITY_HIGH ||
836 		    chan->sec_level == BT_SECURITY_FIPS)
837 			return HCI_AT_NO_BONDING_MITM;
838 		else
839 			return HCI_AT_NO_BONDING;
840 		break;
841 	case L2CAP_CHAN_CONN_ORIENTED:
842 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
843 			if (chan->sec_level == BT_SECURITY_LOW)
844 				chan->sec_level = BT_SECURITY_SDP;
845 
846 			if (chan->sec_level == BT_SECURITY_HIGH ||
847 			    chan->sec_level == BT_SECURITY_FIPS)
848 				return HCI_AT_NO_BONDING_MITM;
849 			else
850 				return HCI_AT_NO_BONDING;
851 		}
852 		/* fall through */
853 	default:
854 		switch (chan->sec_level) {
855 		case BT_SECURITY_HIGH:
856 		case BT_SECURITY_FIPS:
857 			return HCI_AT_GENERAL_BONDING_MITM;
858 		case BT_SECURITY_MEDIUM:
859 			return HCI_AT_GENERAL_BONDING;
860 		default:
861 			return HCI_AT_NO_BONDING;
862 		}
863 		break;
864 	}
865 }
866 
867 /* Service level security */
868 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
869 {
870 	struct l2cap_conn *conn = chan->conn;
871 	__u8 auth_type;
872 
873 	if (conn->hcon->type == LE_LINK)
874 		return smp_conn_security(conn->hcon, chan->sec_level);
875 
876 	auth_type = l2cap_get_auth_type(chan);
877 
878 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
879 				 initiator);
880 }
881 
882 static u8 l2cap_get_ident(struct l2cap_conn *conn)
883 {
884 	u8 id;
885 
886 	/* Get next available identificator.
887 	 *    1 - 128 are used by kernel.
888 	 *  129 - 199 are reserved.
889 	 *  200 - 254 are used by utilities like l2ping, etc.
890 	 */
891 
892 	mutex_lock(&conn->ident_lock);
893 
894 	if (++conn->tx_ident > 128)
895 		conn->tx_ident = 1;
896 
897 	id = conn->tx_ident;
898 
899 	mutex_unlock(&conn->ident_lock);
900 
901 	return id;
902 }
903 
904 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
905 			   void *data)
906 {
907 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
908 	u8 flags;
909 
910 	BT_DBG("code 0x%2.2x", code);
911 
912 	if (!skb)
913 		return;
914 
915 	/* Use NO_FLUSH if supported or we have an LE link (which does
916 	 * not support auto-flushing packets) */
917 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
918 	    conn->hcon->type == LE_LINK)
919 		flags = ACL_START_NO_FLUSH;
920 	else
921 		flags = ACL_START;
922 
923 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
924 	skb->priority = HCI_PRIO_MAX;
925 
926 	hci_send_acl(conn->hchan, skb, flags);
927 }
928 
929 static bool __chan_is_moving(struct l2cap_chan *chan)
930 {
931 	return chan->move_state != L2CAP_MOVE_STABLE &&
932 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
933 }
934 
935 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
936 {
937 	struct hci_conn *hcon = chan->conn->hcon;
938 	u16 flags;
939 
940 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
941 	       skb->priority);
942 
943 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
944 		if (chan->hs_hchan)
945 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
946 		else
947 			kfree_skb(skb);
948 
949 		return;
950 	}
951 
952 	/* Use NO_FLUSH for LE links (where this is the only option) or
953 	 * if the BR/EDR link supports it and flushing has not been
954 	 * explicitly requested (through FLAG_FLUSHABLE).
955 	 */
956 	if (hcon->type == LE_LINK ||
957 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
958 	     lmp_no_flush_capable(hcon->hdev)))
959 		flags = ACL_START_NO_FLUSH;
960 	else
961 		flags = ACL_START;
962 
963 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
964 	hci_send_acl(chan->conn->hchan, skb, flags);
965 }
966 
967 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
968 {
969 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
970 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
971 
972 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
973 		/* S-Frame */
974 		control->sframe = 1;
975 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
976 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
977 
978 		control->sar = 0;
979 		control->txseq = 0;
980 	} else {
981 		/* I-Frame */
982 		control->sframe = 0;
983 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
984 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
985 
986 		control->poll = 0;
987 		control->super = 0;
988 	}
989 }
990 
991 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
992 {
993 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
994 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
995 
996 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
997 		/* S-Frame */
998 		control->sframe = 1;
999 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1000 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1001 
1002 		control->sar = 0;
1003 		control->txseq = 0;
1004 	} else {
1005 		/* I-Frame */
1006 		control->sframe = 0;
1007 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1008 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1009 
1010 		control->poll = 0;
1011 		control->super = 0;
1012 	}
1013 }
1014 
1015 static inline void __unpack_control(struct l2cap_chan *chan,
1016 				    struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		__unpack_extended_control(get_unaligned_le32(skb->data),
1020 					  &bt_cb(skb)->l2cap);
1021 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1022 	} else {
1023 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1024 					  &bt_cb(skb)->l2cap);
1025 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1026 	}
1027 }
1028 
1029 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1030 {
1031 	u32 packed;
1032 
1033 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1034 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1035 
1036 	if (control->sframe) {
1037 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1038 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1039 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1040 	} else {
1041 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1042 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1043 	}
1044 
1045 	return packed;
1046 }
1047 
1048 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1049 {
1050 	u16 packed;
1051 
1052 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1053 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1054 
1055 	if (control->sframe) {
1056 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1057 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1058 		packed |= L2CAP_CTRL_FRAME_TYPE;
1059 	} else {
1060 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1061 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1062 	}
1063 
1064 	return packed;
1065 }
1066 
1067 static inline void __pack_control(struct l2cap_chan *chan,
1068 				  struct l2cap_ctrl *control,
1069 				  struct sk_buff *skb)
1070 {
1071 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1072 		put_unaligned_le32(__pack_extended_control(control),
1073 				   skb->data + L2CAP_HDR_SIZE);
1074 	} else {
1075 		put_unaligned_le16(__pack_enhanced_control(control),
1076 				   skb->data + L2CAP_HDR_SIZE);
1077 	}
1078 }
1079 
1080 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1081 {
1082 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1083 		return L2CAP_EXT_HDR_SIZE;
1084 	else
1085 		return L2CAP_ENH_HDR_SIZE;
1086 }
1087 
1088 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1089 					       u32 control)
1090 {
1091 	struct sk_buff *skb;
1092 	struct l2cap_hdr *lh;
1093 	int hlen = __ertm_hdr_size(chan);
1094 
1095 	if (chan->fcs == L2CAP_FCS_CRC16)
1096 		hlen += L2CAP_FCS_SIZE;
1097 
1098 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1099 
1100 	if (!skb)
1101 		return ERR_PTR(-ENOMEM);
1102 
1103 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1104 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1105 	lh->cid = cpu_to_le16(chan->dcid);
1106 
1107 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1109 	else
1110 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1111 
1112 	if (chan->fcs == L2CAP_FCS_CRC16) {
1113 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1114 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1115 	}
1116 
1117 	skb->priority = HCI_PRIO_MAX;
1118 	return skb;
1119 }
1120 
1121 static void l2cap_send_sframe(struct l2cap_chan *chan,
1122 			      struct l2cap_ctrl *control)
1123 {
1124 	struct sk_buff *skb;
1125 	u32 control_field;
1126 
1127 	BT_DBG("chan %p, control %p", chan, control);
1128 
1129 	if (!control->sframe)
1130 		return;
1131 
1132 	if (__chan_is_moving(chan))
1133 		return;
1134 
1135 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1136 	    !control->poll)
1137 		control->final = 1;
1138 
1139 	if (control->super == L2CAP_SUPER_RR)
1140 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1141 	else if (control->super == L2CAP_SUPER_RNR)
1142 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1143 
1144 	if (control->super != L2CAP_SUPER_SREJ) {
1145 		chan->last_acked_seq = control->reqseq;
1146 		__clear_ack_timer(chan);
1147 	}
1148 
1149 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1150 	       control->final, control->poll, control->super);
1151 
1152 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1153 		control_field = __pack_extended_control(control);
1154 	else
1155 		control_field = __pack_enhanced_control(control);
1156 
1157 	skb = l2cap_create_sframe_pdu(chan, control_field);
1158 	if (!IS_ERR(skb))
1159 		l2cap_do_send(chan, skb);
1160 }
1161 
1162 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1163 {
1164 	struct l2cap_ctrl control;
1165 
1166 	BT_DBG("chan %p, poll %d", chan, poll);
1167 
1168 	memset(&control, 0, sizeof(control));
1169 	control.sframe = 1;
1170 	control.poll = poll;
1171 
1172 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1173 		control.super = L2CAP_SUPER_RNR;
1174 	else
1175 		control.super = L2CAP_SUPER_RR;
1176 
1177 	control.reqseq = chan->buffer_seq;
1178 	l2cap_send_sframe(chan, &control);
1179 }
1180 
1181 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1182 {
1183 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1184 		return true;
1185 
1186 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1187 }
1188 
1189 static bool __amp_capable(struct l2cap_chan *chan)
1190 {
1191 	struct l2cap_conn *conn = chan->conn;
1192 	struct hci_dev *hdev;
1193 	bool amp_available = false;
1194 
1195 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1196 		return false;
1197 
1198 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1199 		return false;
1200 
1201 	read_lock(&hci_dev_list_lock);
1202 	list_for_each_entry(hdev, &hci_dev_list, list) {
1203 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1204 		    test_bit(HCI_UP, &hdev->flags)) {
1205 			amp_available = true;
1206 			break;
1207 		}
1208 	}
1209 	read_unlock(&hci_dev_list_lock);
1210 
1211 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1212 		return amp_available;
1213 
1214 	return false;
1215 }
1216 
1217 static bool l2cap_check_efs(struct l2cap_chan *chan)
1218 {
1219 	/* Check EFS parameters */
1220 	return true;
1221 }
1222 
1223 void l2cap_send_conn_req(struct l2cap_chan *chan)
1224 {
1225 	struct l2cap_conn *conn = chan->conn;
1226 	struct l2cap_conn_req req;
1227 
1228 	req.scid = cpu_to_le16(chan->scid);
1229 	req.psm  = chan->psm;
1230 
1231 	chan->ident = l2cap_get_ident(conn);
1232 
1233 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1234 
1235 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1236 }
1237 
1238 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1239 {
1240 	struct l2cap_create_chan_req req;
1241 	req.scid = cpu_to_le16(chan->scid);
1242 	req.psm  = chan->psm;
1243 	req.amp_id = amp_id;
1244 
1245 	chan->ident = l2cap_get_ident(chan->conn);
1246 
1247 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1248 		       sizeof(req), &req);
1249 }
1250 
1251 static void l2cap_move_setup(struct l2cap_chan *chan)
1252 {
1253 	struct sk_buff *skb;
1254 
1255 	BT_DBG("chan %p", chan);
1256 
1257 	if (chan->mode != L2CAP_MODE_ERTM)
1258 		return;
1259 
1260 	__clear_retrans_timer(chan);
1261 	__clear_monitor_timer(chan);
1262 	__clear_ack_timer(chan);
1263 
1264 	chan->retry_count = 0;
1265 	skb_queue_walk(&chan->tx_q, skb) {
1266 		if (bt_cb(skb)->l2cap.retries)
1267 			bt_cb(skb)->l2cap.retries = 1;
1268 		else
1269 			break;
1270 	}
1271 
1272 	chan->expected_tx_seq = chan->buffer_seq;
1273 
1274 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1275 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1276 	l2cap_seq_list_clear(&chan->retrans_list);
1277 	l2cap_seq_list_clear(&chan->srej_list);
1278 	skb_queue_purge(&chan->srej_q);
1279 
1280 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1281 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1282 
1283 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1284 }
1285 
1286 static void l2cap_move_done(struct l2cap_chan *chan)
1287 {
1288 	u8 move_role = chan->move_role;
1289 	BT_DBG("chan %p", chan);
1290 
1291 	chan->move_state = L2CAP_MOVE_STABLE;
1292 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1293 
1294 	if (chan->mode != L2CAP_MODE_ERTM)
1295 		return;
1296 
1297 	switch (move_role) {
1298 	case L2CAP_MOVE_ROLE_INITIATOR:
1299 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1300 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1301 		break;
1302 	case L2CAP_MOVE_ROLE_RESPONDER:
1303 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1304 		break;
1305 	}
1306 }
1307 
1308 static void l2cap_chan_ready(struct l2cap_chan *chan)
1309 {
1310 	/* The channel may have already been flagged as connected in
1311 	 * case of receiving data before the L2CAP info req/rsp
1312 	 * procedure is complete.
1313 	 */
1314 	if (chan->state == BT_CONNECTED)
1315 		return;
1316 
1317 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1318 	chan->conf_state = 0;
1319 	__clear_chan_timer(chan);
1320 
1321 	switch (chan->mode) {
1322 	case L2CAP_MODE_LE_FLOWCTL:
1323 	case L2CAP_MODE_EXT_FLOWCTL:
1324 		if (!chan->tx_credits)
1325 			chan->ops->suspend(chan);
1326 		break;
1327 	}
1328 
1329 	chan->state = BT_CONNECTED;
1330 
1331 	chan->ops->ready(chan);
1332 }
1333 
1334 static void l2cap_le_connect(struct l2cap_chan *chan)
1335 {
1336 	struct l2cap_conn *conn = chan->conn;
1337 	struct l2cap_le_conn_req req;
1338 
1339 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1340 		return;
1341 
1342 	if (!chan->imtu)
1343 		chan->imtu = chan->conn->mtu;
1344 
1345 	l2cap_le_flowctl_init(chan, 0);
1346 
1347 	req.psm     = chan->psm;
1348 	req.scid    = cpu_to_le16(chan->scid);
1349 	req.mtu     = cpu_to_le16(chan->imtu);
1350 	req.mps     = cpu_to_le16(chan->mps);
1351 	req.credits = cpu_to_le16(chan->rx_credits);
1352 
1353 	chan->ident = l2cap_get_ident(conn);
1354 
1355 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1356 		       sizeof(req), &req);
1357 }
1358 
1359 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1360 {
1361 	struct l2cap_conn *conn = chan->conn;
1362 	struct {
1363 		struct l2cap_ecred_conn_req req;
1364 		__le16 scid;
1365 	} __packed pdu;
1366 
1367 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1368 		return;
1369 
1370 	l2cap_ecred_init(chan, 0);
1371 
1372 	pdu.req.psm     = chan->psm;
1373 	pdu.req.mtu     = cpu_to_le16(chan->imtu);
1374 	pdu.req.mps     = cpu_to_le16(chan->mps);
1375 	pdu.req.credits = cpu_to_le16(chan->rx_credits);
1376 	pdu.scid        = cpu_to_le16(chan->scid);
1377 
1378 	chan->ident = l2cap_get_ident(conn);
1379 
1380 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1381 		       sizeof(pdu), &pdu);
1382 }
1383 
1384 static void l2cap_le_start(struct l2cap_chan *chan)
1385 {
1386 	struct l2cap_conn *conn = chan->conn;
1387 
1388 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1389 		return;
1390 
1391 	if (!chan->psm) {
1392 		l2cap_chan_ready(chan);
1393 		return;
1394 	}
1395 
1396 	if (chan->state == BT_CONNECT) {
1397 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1398 			l2cap_ecred_connect(chan);
1399 		else
1400 			l2cap_le_connect(chan);
1401 	}
1402 }
1403 
1404 static void l2cap_start_connection(struct l2cap_chan *chan)
1405 {
1406 	if (__amp_capable(chan)) {
1407 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1408 		a2mp_discover_amp(chan);
1409 	} else if (chan->conn->hcon->type == LE_LINK) {
1410 		l2cap_le_start(chan);
1411 	} else {
1412 		l2cap_send_conn_req(chan);
1413 	}
1414 }
1415 
1416 static void l2cap_request_info(struct l2cap_conn *conn)
1417 {
1418 	struct l2cap_info_req req;
1419 
1420 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1421 		return;
1422 
1423 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1424 
1425 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1426 	conn->info_ident = l2cap_get_ident(conn);
1427 
1428 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1429 
1430 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1431 		       sizeof(req), &req);
1432 }
1433 
1434 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1435 {
1436 	/* The minimum encryption key size needs to be enforced by the
1437 	 * host stack before establishing any L2CAP connections. The
1438 	 * specification in theory allows a minimum of 1, but to align
1439 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1440 	 *
1441 	 * This check might also be called for unencrypted connections
1442 	 * that have no key size requirements. Ensure that the link is
1443 	 * actually encrypted before enforcing a key size.
1444 	 */
1445 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1446 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1447 }
1448 
1449 static void l2cap_do_start(struct l2cap_chan *chan)
1450 {
1451 	struct l2cap_conn *conn = chan->conn;
1452 
1453 	if (conn->hcon->type == LE_LINK) {
1454 		l2cap_le_start(chan);
1455 		return;
1456 	}
1457 
1458 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1459 		l2cap_request_info(conn);
1460 		return;
1461 	}
1462 
1463 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1464 		return;
1465 
1466 	if (!l2cap_chan_check_security(chan, true) ||
1467 	    !__l2cap_no_conn_pending(chan))
1468 		return;
1469 
1470 	if (l2cap_check_enc_key_size(conn->hcon))
1471 		l2cap_start_connection(chan);
1472 	else
1473 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1474 }
1475 
1476 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1477 {
1478 	u32 local_feat_mask = l2cap_feat_mask;
1479 	if (!disable_ertm)
1480 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1481 
1482 	switch (mode) {
1483 	case L2CAP_MODE_ERTM:
1484 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1485 	case L2CAP_MODE_STREAMING:
1486 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1487 	default:
1488 		return 0x00;
1489 	}
1490 }
1491 
1492 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1493 {
1494 	struct l2cap_conn *conn = chan->conn;
1495 	struct l2cap_disconn_req req;
1496 
1497 	if (!conn)
1498 		return;
1499 
1500 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1501 		__clear_retrans_timer(chan);
1502 		__clear_monitor_timer(chan);
1503 		__clear_ack_timer(chan);
1504 	}
1505 
1506 	if (chan->scid == L2CAP_CID_A2MP) {
1507 		l2cap_state_change(chan, BT_DISCONN);
1508 		return;
1509 	}
1510 
1511 	req.dcid = cpu_to_le16(chan->dcid);
1512 	req.scid = cpu_to_le16(chan->scid);
1513 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1514 		       sizeof(req), &req);
1515 
1516 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1517 }
1518 
1519 /* ---- L2CAP connections ---- */
1520 static void l2cap_conn_start(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_chan *chan, *tmp;
1523 
1524 	BT_DBG("conn %p", conn);
1525 
1526 	mutex_lock(&conn->chan_lock);
1527 
1528 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1529 		l2cap_chan_lock(chan);
1530 
1531 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1532 			l2cap_chan_ready(chan);
1533 			l2cap_chan_unlock(chan);
1534 			continue;
1535 		}
1536 
1537 		if (chan->state == BT_CONNECT) {
1538 			if (!l2cap_chan_check_security(chan, true) ||
1539 			    !__l2cap_no_conn_pending(chan)) {
1540 				l2cap_chan_unlock(chan);
1541 				continue;
1542 			}
1543 
1544 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1545 			    && test_bit(CONF_STATE2_DEVICE,
1546 					&chan->conf_state)) {
1547 				l2cap_chan_close(chan, ECONNRESET);
1548 				l2cap_chan_unlock(chan);
1549 				continue;
1550 			}
1551 
1552 			if (l2cap_check_enc_key_size(conn->hcon))
1553 				l2cap_start_connection(chan);
1554 			else
1555 				l2cap_chan_close(chan, ECONNREFUSED);
1556 
1557 		} else if (chan->state == BT_CONNECT2) {
1558 			struct l2cap_conn_rsp rsp;
1559 			char buf[128];
1560 			rsp.scid = cpu_to_le16(chan->dcid);
1561 			rsp.dcid = cpu_to_le16(chan->scid);
1562 
1563 			if (l2cap_chan_check_security(chan, false)) {
1564 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1565 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1566 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1567 					chan->ops->defer(chan);
1568 
1569 				} else {
1570 					l2cap_state_change(chan, BT_CONFIG);
1571 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1572 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1573 				}
1574 			} else {
1575 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1576 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1577 			}
1578 
1579 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1580 				       sizeof(rsp), &rsp);
1581 
1582 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1583 			    rsp.result != L2CAP_CR_SUCCESS) {
1584 				l2cap_chan_unlock(chan);
1585 				continue;
1586 			}
1587 
1588 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1589 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1590 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1591 			chan->num_conf_req++;
1592 		}
1593 
1594 		l2cap_chan_unlock(chan);
1595 	}
1596 
1597 	mutex_unlock(&conn->chan_lock);
1598 }
1599 
1600 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1601 {
1602 	struct hci_conn *hcon = conn->hcon;
1603 	struct hci_dev *hdev = hcon->hdev;
1604 
1605 	BT_DBG("%s conn %p", hdev->name, conn);
1606 
1607 	/* For outgoing pairing which doesn't necessarily have an
1608 	 * associated socket (e.g. mgmt_pair_device).
1609 	 */
1610 	if (hcon->out)
1611 		smp_conn_security(hcon, hcon->pending_sec_level);
1612 
1613 	/* For LE slave connections, make sure the connection interval
1614 	 * is in the range of the minium and maximum interval that has
1615 	 * been configured for this connection. If not, then trigger
1616 	 * the connection update procedure.
1617 	 */
1618 	if (hcon->role == HCI_ROLE_SLAVE &&
1619 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1620 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1621 		struct l2cap_conn_param_update_req req;
1622 
1623 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1624 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1625 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1626 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1627 
1628 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1629 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1630 	}
1631 }
1632 
1633 static void l2cap_conn_ready(struct l2cap_conn *conn)
1634 {
1635 	struct l2cap_chan *chan;
1636 	struct hci_conn *hcon = conn->hcon;
1637 
1638 	BT_DBG("conn %p", conn);
1639 
1640 	if (hcon->type == ACL_LINK)
1641 		l2cap_request_info(conn);
1642 
1643 	mutex_lock(&conn->chan_lock);
1644 
1645 	list_for_each_entry(chan, &conn->chan_l, list) {
1646 
1647 		l2cap_chan_lock(chan);
1648 
1649 		if (chan->scid == L2CAP_CID_A2MP) {
1650 			l2cap_chan_unlock(chan);
1651 			continue;
1652 		}
1653 
1654 		if (hcon->type == LE_LINK) {
1655 			l2cap_le_start(chan);
1656 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1657 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1658 				l2cap_chan_ready(chan);
1659 		} else if (chan->state == BT_CONNECT) {
1660 			l2cap_do_start(chan);
1661 		}
1662 
1663 		l2cap_chan_unlock(chan);
1664 	}
1665 
1666 	mutex_unlock(&conn->chan_lock);
1667 
1668 	if (hcon->type == LE_LINK)
1669 		l2cap_le_conn_ready(conn);
1670 
1671 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1672 }
1673 
1674 /* Notify sockets that we cannot guaranty reliability anymore */
1675 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1676 {
1677 	struct l2cap_chan *chan;
1678 
1679 	BT_DBG("conn %p", conn);
1680 
1681 	mutex_lock(&conn->chan_lock);
1682 
1683 	list_for_each_entry(chan, &conn->chan_l, list) {
1684 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1685 			l2cap_chan_set_err(chan, err);
1686 	}
1687 
1688 	mutex_unlock(&conn->chan_lock);
1689 }
1690 
1691 static void l2cap_info_timeout(struct work_struct *work)
1692 {
1693 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1694 					       info_timer.work);
1695 
1696 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1697 	conn->info_ident = 0;
1698 
1699 	l2cap_conn_start(conn);
1700 }
1701 
1702 /*
1703  * l2cap_user
1704  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1705  * callback is called during registration. The ->remove callback is called
1706  * during unregistration.
1707  * An l2cap_user object can either be explicitly unregistered or when the
1708  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1709  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1710  * External modules must own a reference to the l2cap_conn object if they intend
1711  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1712  * any time if they don't.
1713  */
1714 
1715 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1716 {
1717 	struct hci_dev *hdev = conn->hcon->hdev;
1718 	int ret;
1719 
1720 	/* We need to check whether l2cap_conn is registered. If it is not, we
1721 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1722 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1723 	 * relies on the parent hci_conn object to be locked. This itself relies
1724 	 * on the hci_dev object to be locked. So we must lock the hci device
1725 	 * here, too. */
1726 
1727 	hci_dev_lock(hdev);
1728 
1729 	if (!list_empty(&user->list)) {
1730 		ret = -EINVAL;
1731 		goto out_unlock;
1732 	}
1733 
1734 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1735 	if (!conn->hchan) {
1736 		ret = -ENODEV;
1737 		goto out_unlock;
1738 	}
1739 
1740 	ret = user->probe(conn, user);
1741 	if (ret)
1742 		goto out_unlock;
1743 
1744 	list_add(&user->list, &conn->users);
1745 	ret = 0;
1746 
1747 out_unlock:
1748 	hci_dev_unlock(hdev);
1749 	return ret;
1750 }
1751 EXPORT_SYMBOL(l2cap_register_user);
1752 
1753 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1754 {
1755 	struct hci_dev *hdev = conn->hcon->hdev;
1756 
1757 	hci_dev_lock(hdev);
1758 
1759 	if (list_empty(&user->list))
1760 		goto out_unlock;
1761 
1762 	list_del_init(&user->list);
1763 	user->remove(conn, user);
1764 
1765 out_unlock:
1766 	hci_dev_unlock(hdev);
1767 }
1768 EXPORT_SYMBOL(l2cap_unregister_user);
1769 
1770 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1771 {
1772 	struct l2cap_user *user;
1773 
1774 	while (!list_empty(&conn->users)) {
1775 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1776 		list_del_init(&user->list);
1777 		user->remove(conn, user);
1778 	}
1779 }
1780 
1781 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1782 {
1783 	struct l2cap_conn *conn = hcon->l2cap_data;
1784 	struct l2cap_chan *chan, *l;
1785 
1786 	if (!conn)
1787 		return;
1788 
1789 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1790 
1791 	kfree_skb(conn->rx_skb);
1792 
1793 	skb_queue_purge(&conn->pending_rx);
1794 
1795 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1796 	 * might block if we are running on a worker from the same workqueue
1797 	 * pending_rx_work is waiting on.
1798 	 */
1799 	if (work_pending(&conn->pending_rx_work))
1800 		cancel_work_sync(&conn->pending_rx_work);
1801 
1802 	if (work_pending(&conn->id_addr_update_work))
1803 		cancel_work_sync(&conn->id_addr_update_work);
1804 
1805 	l2cap_unregister_all_users(conn);
1806 
1807 	/* Force the connection to be immediately dropped */
1808 	hcon->disc_timeout = 0;
1809 
1810 	mutex_lock(&conn->chan_lock);
1811 
1812 	/* Kill channels */
1813 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1814 		l2cap_chan_hold(chan);
1815 		l2cap_chan_lock(chan);
1816 
1817 		l2cap_chan_del(chan, err);
1818 
1819 		chan->ops->close(chan);
1820 
1821 		l2cap_chan_unlock(chan);
1822 		l2cap_chan_put(chan);
1823 	}
1824 
1825 	mutex_unlock(&conn->chan_lock);
1826 
1827 	hci_chan_del(conn->hchan);
1828 
1829 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1830 		cancel_delayed_work_sync(&conn->info_timer);
1831 
1832 	hcon->l2cap_data = NULL;
1833 	conn->hchan = NULL;
1834 	l2cap_conn_put(conn);
1835 }
1836 
1837 static void l2cap_conn_free(struct kref *ref)
1838 {
1839 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1840 
1841 	hci_conn_put(conn->hcon);
1842 	kfree(conn);
1843 }
1844 
1845 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1846 {
1847 	kref_get(&conn->ref);
1848 	return conn;
1849 }
1850 EXPORT_SYMBOL(l2cap_conn_get);
1851 
1852 void l2cap_conn_put(struct l2cap_conn *conn)
1853 {
1854 	kref_put(&conn->ref, l2cap_conn_free);
1855 }
1856 EXPORT_SYMBOL(l2cap_conn_put);
1857 
1858 /* ---- Socket interface ---- */
1859 
1860 /* Find socket with psm and source / destination bdaddr.
1861  * Returns closest match.
1862  */
1863 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1864 						   bdaddr_t *src,
1865 						   bdaddr_t *dst,
1866 						   u8 link_type)
1867 {
1868 	struct l2cap_chan *c, *c1 = NULL;
1869 
1870 	read_lock(&chan_list_lock);
1871 
1872 	list_for_each_entry(c, &chan_list, global_l) {
1873 		if (state && c->state != state)
1874 			continue;
1875 
1876 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1877 			continue;
1878 
1879 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1880 			continue;
1881 
1882 		if (c->psm == psm) {
1883 			int src_match, dst_match;
1884 			int src_any, dst_any;
1885 
1886 			/* Exact match. */
1887 			src_match = !bacmp(&c->src, src);
1888 			dst_match = !bacmp(&c->dst, dst);
1889 			if (src_match && dst_match) {
1890 				l2cap_chan_hold(c);
1891 				read_unlock(&chan_list_lock);
1892 				return c;
1893 			}
1894 
1895 			/* Closest match */
1896 			src_any = !bacmp(&c->src, BDADDR_ANY);
1897 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1898 			if ((src_match && dst_any) || (src_any && dst_match) ||
1899 			    (src_any && dst_any))
1900 				c1 = c;
1901 		}
1902 	}
1903 
1904 	if (c1)
1905 		l2cap_chan_hold(c1);
1906 
1907 	read_unlock(&chan_list_lock);
1908 
1909 	return c1;
1910 }
1911 
1912 static void l2cap_monitor_timeout(struct work_struct *work)
1913 {
1914 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1915 					       monitor_timer.work);
1916 
1917 	BT_DBG("chan %p", chan);
1918 
1919 	l2cap_chan_lock(chan);
1920 
1921 	if (!chan->conn) {
1922 		l2cap_chan_unlock(chan);
1923 		l2cap_chan_put(chan);
1924 		return;
1925 	}
1926 
1927 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1928 
1929 	l2cap_chan_unlock(chan);
1930 	l2cap_chan_put(chan);
1931 }
1932 
1933 static void l2cap_retrans_timeout(struct work_struct *work)
1934 {
1935 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1936 					       retrans_timer.work);
1937 
1938 	BT_DBG("chan %p", chan);
1939 
1940 	l2cap_chan_lock(chan);
1941 
1942 	if (!chan->conn) {
1943 		l2cap_chan_unlock(chan);
1944 		l2cap_chan_put(chan);
1945 		return;
1946 	}
1947 
1948 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1949 	l2cap_chan_unlock(chan);
1950 	l2cap_chan_put(chan);
1951 }
1952 
1953 static void l2cap_streaming_send(struct l2cap_chan *chan,
1954 				 struct sk_buff_head *skbs)
1955 {
1956 	struct sk_buff *skb;
1957 	struct l2cap_ctrl *control;
1958 
1959 	BT_DBG("chan %p, skbs %p", chan, skbs);
1960 
1961 	if (__chan_is_moving(chan))
1962 		return;
1963 
1964 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1965 
1966 	while (!skb_queue_empty(&chan->tx_q)) {
1967 
1968 		skb = skb_dequeue(&chan->tx_q);
1969 
1970 		bt_cb(skb)->l2cap.retries = 1;
1971 		control = &bt_cb(skb)->l2cap;
1972 
1973 		control->reqseq = 0;
1974 		control->txseq = chan->next_tx_seq;
1975 
1976 		__pack_control(chan, control, skb);
1977 
1978 		if (chan->fcs == L2CAP_FCS_CRC16) {
1979 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1980 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1981 		}
1982 
1983 		l2cap_do_send(chan, skb);
1984 
1985 		BT_DBG("Sent txseq %u", control->txseq);
1986 
1987 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1988 		chan->frames_sent++;
1989 	}
1990 }
1991 
1992 static int l2cap_ertm_send(struct l2cap_chan *chan)
1993 {
1994 	struct sk_buff *skb, *tx_skb;
1995 	struct l2cap_ctrl *control;
1996 	int sent = 0;
1997 
1998 	BT_DBG("chan %p", chan);
1999 
2000 	if (chan->state != BT_CONNECTED)
2001 		return -ENOTCONN;
2002 
2003 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2004 		return 0;
2005 
2006 	if (__chan_is_moving(chan))
2007 		return 0;
2008 
2009 	while (chan->tx_send_head &&
2010 	       chan->unacked_frames < chan->remote_tx_win &&
2011 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2012 
2013 		skb = chan->tx_send_head;
2014 
2015 		bt_cb(skb)->l2cap.retries = 1;
2016 		control = &bt_cb(skb)->l2cap;
2017 
2018 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2019 			control->final = 1;
2020 
2021 		control->reqseq = chan->buffer_seq;
2022 		chan->last_acked_seq = chan->buffer_seq;
2023 		control->txseq = chan->next_tx_seq;
2024 
2025 		__pack_control(chan, control, skb);
2026 
2027 		if (chan->fcs == L2CAP_FCS_CRC16) {
2028 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2029 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2030 		}
2031 
2032 		/* Clone after data has been modified. Data is assumed to be
2033 		   read-only (for locking purposes) on cloned sk_buffs.
2034 		 */
2035 		tx_skb = skb_clone(skb, GFP_KERNEL);
2036 
2037 		if (!tx_skb)
2038 			break;
2039 
2040 		__set_retrans_timer(chan);
2041 
2042 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2043 		chan->unacked_frames++;
2044 		chan->frames_sent++;
2045 		sent++;
2046 
2047 		if (skb_queue_is_last(&chan->tx_q, skb))
2048 			chan->tx_send_head = NULL;
2049 		else
2050 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2051 
2052 		l2cap_do_send(chan, tx_skb);
2053 		BT_DBG("Sent txseq %u", control->txseq);
2054 	}
2055 
2056 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2057 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2058 
2059 	return sent;
2060 }
2061 
2062 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2063 {
2064 	struct l2cap_ctrl control;
2065 	struct sk_buff *skb;
2066 	struct sk_buff *tx_skb;
2067 	u16 seq;
2068 
2069 	BT_DBG("chan %p", chan);
2070 
2071 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2072 		return;
2073 
2074 	if (__chan_is_moving(chan))
2075 		return;
2076 
2077 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2078 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2079 
2080 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2081 		if (!skb) {
2082 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2083 			       seq);
2084 			continue;
2085 		}
2086 
2087 		bt_cb(skb)->l2cap.retries++;
2088 		control = bt_cb(skb)->l2cap;
2089 
2090 		if (chan->max_tx != 0 &&
2091 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2092 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2093 			l2cap_send_disconn_req(chan, ECONNRESET);
2094 			l2cap_seq_list_clear(&chan->retrans_list);
2095 			break;
2096 		}
2097 
2098 		control.reqseq = chan->buffer_seq;
2099 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2100 			control.final = 1;
2101 		else
2102 			control.final = 0;
2103 
2104 		if (skb_cloned(skb)) {
2105 			/* Cloned sk_buffs are read-only, so we need a
2106 			 * writeable copy
2107 			 */
2108 			tx_skb = skb_copy(skb, GFP_KERNEL);
2109 		} else {
2110 			tx_skb = skb_clone(skb, GFP_KERNEL);
2111 		}
2112 
2113 		if (!tx_skb) {
2114 			l2cap_seq_list_clear(&chan->retrans_list);
2115 			break;
2116 		}
2117 
2118 		/* Update skb contents */
2119 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2120 			put_unaligned_le32(__pack_extended_control(&control),
2121 					   tx_skb->data + L2CAP_HDR_SIZE);
2122 		} else {
2123 			put_unaligned_le16(__pack_enhanced_control(&control),
2124 					   tx_skb->data + L2CAP_HDR_SIZE);
2125 		}
2126 
2127 		/* Update FCS */
2128 		if (chan->fcs == L2CAP_FCS_CRC16) {
2129 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2130 					tx_skb->len - L2CAP_FCS_SIZE);
2131 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2132 						L2CAP_FCS_SIZE);
2133 		}
2134 
2135 		l2cap_do_send(chan, tx_skb);
2136 
2137 		BT_DBG("Resent txseq %d", control.txseq);
2138 
2139 		chan->last_acked_seq = chan->buffer_seq;
2140 	}
2141 }
2142 
2143 static void l2cap_retransmit(struct l2cap_chan *chan,
2144 			     struct l2cap_ctrl *control)
2145 {
2146 	BT_DBG("chan %p, control %p", chan, control);
2147 
2148 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2149 	l2cap_ertm_resend(chan);
2150 }
2151 
2152 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2153 				 struct l2cap_ctrl *control)
2154 {
2155 	struct sk_buff *skb;
2156 
2157 	BT_DBG("chan %p, control %p", chan, control);
2158 
2159 	if (control->poll)
2160 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2161 
2162 	l2cap_seq_list_clear(&chan->retrans_list);
2163 
2164 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2165 		return;
2166 
2167 	if (chan->unacked_frames) {
2168 		skb_queue_walk(&chan->tx_q, skb) {
2169 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2170 			    skb == chan->tx_send_head)
2171 				break;
2172 		}
2173 
2174 		skb_queue_walk_from(&chan->tx_q, skb) {
2175 			if (skb == chan->tx_send_head)
2176 				break;
2177 
2178 			l2cap_seq_list_append(&chan->retrans_list,
2179 					      bt_cb(skb)->l2cap.txseq);
2180 		}
2181 
2182 		l2cap_ertm_resend(chan);
2183 	}
2184 }
2185 
2186 static void l2cap_send_ack(struct l2cap_chan *chan)
2187 {
2188 	struct l2cap_ctrl control;
2189 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2190 					 chan->last_acked_seq);
2191 	int threshold;
2192 
2193 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2194 	       chan, chan->last_acked_seq, chan->buffer_seq);
2195 
2196 	memset(&control, 0, sizeof(control));
2197 	control.sframe = 1;
2198 
2199 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2200 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2201 		__clear_ack_timer(chan);
2202 		control.super = L2CAP_SUPER_RNR;
2203 		control.reqseq = chan->buffer_seq;
2204 		l2cap_send_sframe(chan, &control);
2205 	} else {
2206 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2207 			l2cap_ertm_send(chan);
2208 			/* If any i-frames were sent, they included an ack */
2209 			if (chan->buffer_seq == chan->last_acked_seq)
2210 				frames_to_ack = 0;
2211 		}
2212 
2213 		/* Ack now if the window is 3/4ths full.
2214 		 * Calculate without mul or div
2215 		 */
2216 		threshold = chan->ack_win;
2217 		threshold += threshold << 1;
2218 		threshold >>= 2;
2219 
2220 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2221 		       threshold);
2222 
2223 		if (frames_to_ack >= threshold) {
2224 			__clear_ack_timer(chan);
2225 			control.super = L2CAP_SUPER_RR;
2226 			control.reqseq = chan->buffer_seq;
2227 			l2cap_send_sframe(chan, &control);
2228 			frames_to_ack = 0;
2229 		}
2230 
2231 		if (frames_to_ack)
2232 			__set_ack_timer(chan);
2233 	}
2234 }
2235 
2236 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2237 					 struct msghdr *msg, int len,
2238 					 int count, struct sk_buff *skb)
2239 {
2240 	struct l2cap_conn *conn = chan->conn;
2241 	struct sk_buff **frag;
2242 	int sent = 0;
2243 
2244 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2245 		return -EFAULT;
2246 
2247 	sent += count;
2248 	len  -= count;
2249 
2250 	/* Continuation fragments (no L2CAP header) */
2251 	frag = &skb_shinfo(skb)->frag_list;
2252 	while (len) {
2253 		struct sk_buff *tmp;
2254 
2255 		count = min_t(unsigned int, conn->mtu, len);
2256 
2257 		tmp = chan->ops->alloc_skb(chan, 0, count,
2258 					   msg->msg_flags & MSG_DONTWAIT);
2259 		if (IS_ERR(tmp))
2260 			return PTR_ERR(tmp);
2261 
2262 		*frag = tmp;
2263 
2264 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2265 				   &msg->msg_iter))
2266 			return -EFAULT;
2267 
2268 		sent += count;
2269 		len  -= count;
2270 
2271 		skb->len += (*frag)->len;
2272 		skb->data_len += (*frag)->len;
2273 
2274 		frag = &(*frag)->next;
2275 	}
2276 
2277 	return sent;
2278 }
2279 
2280 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2281 						 struct msghdr *msg, size_t len)
2282 {
2283 	struct l2cap_conn *conn = chan->conn;
2284 	struct sk_buff *skb;
2285 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2286 	struct l2cap_hdr *lh;
2287 
2288 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2289 	       __le16_to_cpu(chan->psm), len);
2290 
2291 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2292 
2293 	skb = chan->ops->alloc_skb(chan, hlen, count,
2294 				   msg->msg_flags & MSG_DONTWAIT);
2295 	if (IS_ERR(skb))
2296 		return skb;
2297 
2298 	/* Create L2CAP header */
2299 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2300 	lh->cid = cpu_to_le16(chan->dcid);
2301 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2302 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2303 
2304 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2305 	if (unlikely(err < 0)) {
2306 		kfree_skb(skb);
2307 		return ERR_PTR(err);
2308 	}
2309 	return skb;
2310 }
2311 
2312 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2313 					      struct msghdr *msg, size_t len)
2314 {
2315 	struct l2cap_conn *conn = chan->conn;
2316 	struct sk_buff *skb;
2317 	int err, count;
2318 	struct l2cap_hdr *lh;
2319 
2320 	BT_DBG("chan %p len %zu", chan, len);
2321 
2322 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2323 
2324 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2325 				   msg->msg_flags & MSG_DONTWAIT);
2326 	if (IS_ERR(skb))
2327 		return skb;
2328 
2329 	/* Create L2CAP header */
2330 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2331 	lh->cid = cpu_to_le16(chan->dcid);
2332 	lh->len = cpu_to_le16(len);
2333 
2334 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2335 	if (unlikely(err < 0)) {
2336 		kfree_skb(skb);
2337 		return ERR_PTR(err);
2338 	}
2339 	return skb;
2340 }
2341 
2342 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2343 					       struct msghdr *msg, size_t len,
2344 					       u16 sdulen)
2345 {
2346 	struct l2cap_conn *conn = chan->conn;
2347 	struct sk_buff *skb;
2348 	int err, count, hlen;
2349 	struct l2cap_hdr *lh;
2350 
2351 	BT_DBG("chan %p len %zu", chan, len);
2352 
2353 	if (!conn)
2354 		return ERR_PTR(-ENOTCONN);
2355 
2356 	hlen = __ertm_hdr_size(chan);
2357 
2358 	if (sdulen)
2359 		hlen += L2CAP_SDULEN_SIZE;
2360 
2361 	if (chan->fcs == L2CAP_FCS_CRC16)
2362 		hlen += L2CAP_FCS_SIZE;
2363 
2364 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2365 
2366 	skb = chan->ops->alloc_skb(chan, hlen, count,
2367 				   msg->msg_flags & MSG_DONTWAIT);
2368 	if (IS_ERR(skb))
2369 		return skb;
2370 
2371 	/* Create L2CAP header */
2372 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2373 	lh->cid = cpu_to_le16(chan->dcid);
2374 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2375 
2376 	/* Control header is populated later */
2377 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2378 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2379 	else
2380 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2381 
2382 	if (sdulen)
2383 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2384 
2385 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2386 	if (unlikely(err < 0)) {
2387 		kfree_skb(skb);
2388 		return ERR_PTR(err);
2389 	}
2390 
2391 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2392 	bt_cb(skb)->l2cap.retries = 0;
2393 	return skb;
2394 }
2395 
2396 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2397 			     struct sk_buff_head *seg_queue,
2398 			     struct msghdr *msg, size_t len)
2399 {
2400 	struct sk_buff *skb;
2401 	u16 sdu_len;
2402 	size_t pdu_len;
2403 	u8 sar;
2404 
2405 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2406 
2407 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2408 	 * so fragmented skbs are not used.  The HCI layer's handling
2409 	 * of fragmented skbs is not compatible with ERTM's queueing.
2410 	 */
2411 
2412 	/* PDU size is derived from the HCI MTU */
2413 	pdu_len = chan->conn->mtu;
2414 
2415 	/* Constrain PDU size for BR/EDR connections */
2416 	if (!chan->hs_hcon)
2417 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2418 
2419 	/* Adjust for largest possible L2CAP overhead. */
2420 	if (chan->fcs)
2421 		pdu_len -= L2CAP_FCS_SIZE;
2422 
2423 	pdu_len -= __ertm_hdr_size(chan);
2424 
2425 	/* Remote device may have requested smaller PDUs */
2426 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2427 
2428 	if (len <= pdu_len) {
2429 		sar = L2CAP_SAR_UNSEGMENTED;
2430 		sdu_len = 0;
2431 		pdu_len = len;
2432 	} else {
2433 		sar = L2CAP_SAR_START;
2434 		sdu_len = len;
2435 	}
2436 
2437 	while (len > 0) {
2438 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2439 
2440 		if (IS_ERR(skb)) {
2441 			__skb_queue_purge(seg_queue);
2442 			return PTR_ERR(skb);
2443 		}
2444 
2445 		bt_cb(skb)->l2cap.sar = sar;
2446 		__skb_queue_tail(seg_queue, skb);
2447 
2448 		len -= pdu_len;
2449 		if (sdu_len)
2450 			sdu_len = 0;
2451 
2452 		if (len <= pdu_len) {
2453 			sar = L2CAP_SAR_END;
2454 			pdu_len = len;
2455 		} else {
2456 			sar = L2CAP_SAR_CONTINUE;
2457 		}
2458 	}
2459 
2460 	return 0;
2461 }
2462 
2463 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2464 						   struct msghdr *msg,
2465 						   size_t len, u16 sdulen)
2466 {
2467 	struct l2cap_conn *conn = chan->conn;
2468 	struct sk_buff *skb;
2469 	int err, count, hlen;
2470 	struct l2cap_hdr *lh;
2471 
2472 	BT_DBG("chan %p len %zu", chan, len);
2473 
2474 	if (!conn)
2475 		return ERR_PTR(-ENOTCONN);
2476 
2477 	hlen = L2CAP_HDR_SIZE;
2478 
2479 	if (sdulen)
2480 		hlen += L2CAP_SDULEN_SIZE;
2481 
2482 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2483 
2484 	skb = chan->ops->alloc_skb(chan, hlen, count,
2485 				   msg->msg_flags & MSG_DONTWAIT);
2486 	if (IS_ERR(skb))
2487 		return skb;
2488 
2489 	/* Create L2CAP header */
2490 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2491 	lh->cid = cpu_to_le16(chan->dcid);
2492 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2493 
2494 	if (sdulen)
2495 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2496 
2497 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2498 	if (unlikely(err < 0)) {
2499 		kfree_skb(skb);
2500 		return ERR_PTR(err);
2501 	}
2502 
2503 	return skb;
2504 }
2505 
2506 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2507 				struct sk_buff_head *seg_queue,
2508 				struct msghdr *msg, size_t len)
2509 {
2510 	struct sk_buff *skb;
2511 	size_t pdu_len;
2512 	u16 sdu_len;
2513 
2514 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2515 
2516 	sdu_len = len;
2517 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2518 
2519 	while (len > 0) {
2520 		if (len <= pdu_len)
2521 			pdu_len = len;
2522 
2523 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2524 		if (IS_ERR(skb)) {
2525 			__skb_queue_purge(seg_queue);
2526 			return PTR_ERR(skb);
2527 		}
2528 
2529 		__skb_queue_tail(seg_queue, skb);
2530 
2531 		len -= pdu_len;
2532 
2533 		if (sdu_len) {
2534 			sdu_len = 0;
2535 			pdu_len += L2CAP_SDULEN_SIZE;
2536 		}
2537 	}
2538 
2539 	return 0;
2540 }
2541 
2542 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2543 {
2544 	int sent = 0;
2545 
2546 	BT_DBG("chan %p", chan);
2547 
2548 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2549 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2550 		chan->tx_credits--;
2551 		sent++;
2552 	}
2553 
2554 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2555 	       skb_queue_len(&chan->tx_q));
2556 }
2557 
2558 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2559 {
2560 	struct sk_buff *skb;
2561 	int err;
2562 	struct sk_buff_head seg_queue;
2563 
2564 	if (!chan->conn)
2565 		return -ENOTCONN;
2566 
2567 	/* Connectionless channel */
2568 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2569 		skb = l2cap_create_connless_pdu(chan, msg, len);
2570 		if (IS_ERR(skb))
2571 			return PTR_ERR(skb);
2572 
2573 		/* Channel lock is released before requesting new skb and then
2574 		 * reacquired thus we need to recheck channel state.
2575 		 */
2576 		if (chan->state != BT_CONNECTED) {
2577 			kfree_skb(skb);
2578 			return -ENOTCONN;
2579 		}
2580 
2581 		l2cap_do_send(chan, skb);
2582 		return len;
2583 	}
2584 
2585 	switch (chan->mode) {
2586 	case L2CAP_MODE_LE_FLOWCTL:
2587 	case L2CAP_MODE_EXT_FLOWCTL:
2588 		/* Check outgoing MTU */
2589 		if (len > chan->omtu)
2590 			return -EMSGSIZE;
2591 
2592 		__skb_queue_head_init(&seg_queue);
2593 
2594 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2595 
2596 		if (chan->state != BT_CONNECTED) {
2597 			__skb_queue_purge(&seg_queue);
2598 			err = -ENOTCONN;
2599 		}
2600 
2601 		if (err)
2602 			return err;
2603 
2604 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2605 
2606 		l2cap_le_flowctl_send(chan);
2607 
2608 		if (!chan->tx_credits)
2609 			chan->ops->suspend(chan);
2610 
2611 		err = len;
2612 
2613 		break;
2614 
2615 	case L2CAP_MODE_BASIC:
2616 		/* Check outgoing MTU */
2617 		if (len > chan->omtu)
2618 			return -EMSGSIZE;
2619 
2620 		/* Create a basic PDU */
2621 		skb = l2cap_create_basic_pdu(chan, msg, len);
2622 		if (IS_ERR(skb))
2623 			return PTR_ERR(skb);
2624 
2625 		/* Channel lock is released before requesting new skb and then
2626 		 * reacquired thus we need to recheck channel state.
2627 		 */
2628 		if (chan->state != BT_CONNECTED) {
2629 			kfree_skb(skb);
2630 			return -ENOTCONN;
2631 		}
2632 
2633 		l2cap_do_send(chan, skb);
2634 		err = len;
2635 		break;
2636 
2637 	case L2CAP_MODE_ERTM:
2638 	case L2CAP_MODE_STREAMING:
2639 		/* Check outgoing MTU */
2640 		if (len > chan->omtu) {
2641 			err = -EMSGSIZE;
2642 			break;
2643 		}
2644 
2645 		__skb_queue_head_init(&seg_queue);
2646 
2647 		/* Do segmentation before calling in to the state machine,
2648 		 * since it's possible to block while waiting for memory
2649 		 * allocation.
2650 		 */
2651 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2652 
2653 		/* The channel could have been closed while segmenting,
2654 		 * check that it is still connected.
2655 		 */
2656 		if (chan->state != BT_CONNECTED) {
2657 			__skb_queue_purge(&seg_queue);
2658 			err = -ENOTCONN;
2659 		}
2660 
2661 		if (err)
2662 			break;
2663 
2664 		if (chan->mode == L2CAP_MODE_ERTM)
2665 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2666 		else
2667 			l2cap_streaming_send(chan, &seg_queue);
2668 
2669 		err = len;
2670 
2671 		/* If the skbs were not queued for sending, they'll still be in
2672 		 * seg_queue and need to be purged.
2673 		 */
2674 		__skb_queue_purge(&seg_queue);
2675 		break;
2676 
2677 	default:
2678 		BT_DBG("bad state %1.1x", chan->mode);
2679 		err = -EBADFD;
2680 	}
2681 
2682 	return err;
2683 }
2684 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2685 
2686 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2687 {
2688 	struct l2cap_ctrl control;
2689 	u16 seq;
2690 
2691 	BT_DBG("chan %p, txseq %u", chan, txseq);
2692 
2693 	memset(&control, 0, sizeof(control));
2694 	control.sframe = 1;
2695 	control.super = L2CAP_SUPER_SREJ;
2696 
2697 	for (seq = chan->expected_tx_seq; seq != txseq;
2698 	     seq = __next_seq(chan, seq)) {
2699 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2700 			control.reqseq = seq;
2701 			l2cap_send_sframe(chan, &control);
2702 			l2cap_seq_list_append(&chan->srej_list, seq);
2703 		}
2704 	}
2705 
2706 	chan->expected_tx_seq = __next_seq(chan, txseq);
2707 }
2708 
2709 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2710 {
2711 	struct l2cap_ctrl control;
2712 
2713 	BT_DBG("chan %p", chan);
2714 
2715 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2716 		return;
2717 
2718 	memset(&control, 0, sizeof(control));
2719 	control.sframe = 1;
2720 	control.super = L2CAP_SUPER_SREJ;
2721 	control.reqseq = chan->srej_list.tail;
2722 	l2cap_send_sframe(chan, &control);
2723 }
2724 
2725 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2726 {
2727 	struct l2cap_ctrl control;
2728 	u16 initial_head;
2729 	u16 seq;
2730 
2731 	BT_DBG("chan %p, txseq %u", chan, txseq);
2732 
2733 	memset(&control, 0, sizeof(control));
2734 	control.sframe = 1;
2735 	control.super = L2CAP_SUPER_SREJ;
2736 
2737 	/* Capture initial list head to allow only one pass through the list. */
2738 	initial_head = chan->srej_list.head;
2739 
2740 	do {
2741 		seq = l2cap_seq_list_pop(&chan->srej_list);
2742 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2743 			break;
2744 
2745 		control.reqseq = seq;
2746 		l2cap_send_sframe(chan, &control);
2747 		l2cap_seq_list_append(&chan->srej_list, seq);
2748 	} while (chan->srej_list.head != initial_head);
2749 }
2750 
2751 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2752 {
2753 	struct sk_buff *acked_skb;
2754 	u16 ackseq;
2755 
2756 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2757 
2758 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2759 		return;
2760 
2761 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2762 	       chan->expected_ack_seq, chan->unacked_frames);
2763 
2764 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2765 	     ackseq = __next_seq(chan, ackseq)) {
2766 
2767 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2768 		if (acked_skb) {
2769 			skb_unlink(acked_skb, &chan->tx_q);
2770 			kfree_skb(acked_skb);
2771 			chan->unacked_frames--;
2772 		}
2773 	}
2774 
2775 	chan->expected_ack_seq = reqseq;
2776 
2777 	if (chan->unacked_frames == 0)
2778 		__clear_retrans_timer(chan);
2779 
2780 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2781 }
2782 
2783 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2784 {
2785 	BT_DBG("chan %p", chan);
2786 
2787 	chan->expected_tx_seq = chan->buffer_seq;
2788 	l2cap_seq_list_clear(&chan->srej_list);
2789 	skb_queue_purge(&chan->srej_q);
2790 	chan->rx_state = L2CAP_RX_STATE_RECV;
2791 }
2792 
2793 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2794 				struct l2cap_ctrl *control,
2795 				struct sk_buff_head *skbs, u8 event)
2796 {
2797 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2798 	       event);
2799 
2800 	switch (event) {
2801 	case L2CAP_EV_DATA_REQUEST:
2802 		if (chan->tx_send_head == NULL)
2803 			chan->tx_send_head = skb_peek(skbs);
2804 
2805 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2806 		l2cap_ertm_send(chan);
2807 		break;
2808 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2809 		BT_DBG("Enter LOCAL_BUSY");
2810 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2811 
2812 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2813 			/* The SREJ_SENT state must be aborted if we are to
2814 			 * enter the LOCAL_BUSY state.
2815 			 */
2816 			l2cap_abort_rx_srej_sent(chan);
2817 		}
2818 
2819 		l2cap_send_ack(chan);
2820 
2821 		break;
2822 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2823 		BT_DBG("Exit LOCAL_BUSY");
2824 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2825 
2826 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2827 			struct l2cap_ctrl local_control;
2828 
2829 			memset(&local_control, 0, sizeof(local_control));
2830 			local_control.sframe = 1;
2831 			local_control.super = L2CAP_SUPER_RR;
2832 			local_control.poll = 1;
2833 			local_control.reqseq = chan->buffer_seq;
2834 			l2cap_send_sframe(chan, &local_control);
2835 
2836 			chan->retry_count = 1;
2837 			__set_monitor_timer(chan);
2838 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2839 		}
2840 		break;
2841 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2842 		l2cap_process_reqseq(chan, control->reqseq);
2843 		break;
2844 	case L2CAP_EV_EXPLICIT_POLL:
2845 		l2cap_send_rr_or_rnr(chan, 1);
2846 		chan->retry_count = 1;
2847 		__set_monitor_timer(chan);
2848 		__clear_ack_timer(chan);
2849 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2850 		break;
2851 	case L2CAP_EV_RETRANS_TO:
2852 		l2cap_send_rr_or_rnr(chan, 1);
2853 		chan->retry_count = 1;
2854 		__set_monitor_timer(chan);
2855 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2856 		break;
2857 	case L2CAP_EV_RECV_FBIT:
2858 		/* Nothing to process */
2859 		break;
2860 	default:
2861 		break;
2862 	}
2863 }
2864 
2865 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2866 				  struct l2cap_ctrl *control,
2867 				  struct sk_buff_head *skbs, u8 event)
2868 {
2869 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2870 	       event);
2871 
2872 	switch (event) {
2873 	case L2CAP_EV_DATA_REQUEST:
2874 		if (chan->tx_send_head == NULL)
2875 			chan->tx_send_head = skb_peek(skbs);
2876 		/* Queue data, but don't send. */
2877 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2878 		break;
2879 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2880 		BT_DBG("Enter LOCAL_BUSY");
2881 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2882 
2883 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2884 			/* The SREJ_SENT state must be aborted if we are to
2885 			 * enter the LOCAL_BUSY state.
2886 			 */
2887 			l2cap_abort_rx_srej_sent(chan);
2888 		}
2889 
2890 		l2cap_send_ack(chan);
2891 
2892 		break;
2893 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2894 		BT_DBG("Exit LOCAL_BUSY");
2895 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2896 
2897 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2898 			struct l2cap_ctrl local_control;
2899 			memset(&local_control, 0, sizeof(local_control));
2900 			local_control.sframe = 1;
2901 			local_control.super = L2CAP_SUPER_RR;
2902 			local_control.poll = 1;
2903 			local_control.reqseq = chan->buffer_seq;
2904 			l2cap_send_sframe(chan, &local_control);
2905 
2906 			chan->retry_count = 1;
2907 			__set_monitor_timer(chan);
2908 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2909 		}
2910 		break;
2911 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2912 		l2cap_process_reqseq(chan, control->reqseq);
2913 
2914 		/* Fall through */
2915 
2916 	case L2CAP_EV_RECV_FBIT:
2917 		if (control && control->final) {
2918 			__clear_monitor_timer(chan);
2919 			if (chan->unacked_frames > 0)
2920 				__set_retrans_timer(chan);
2921 			chan->retry_count = 0;
2922 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2923 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2924 		}
2925 		break;
2926 	case L2CAP_EV_EXPLICIT_POLL:
2927 		/* Ignore */
2928 		break;
2929 	case L2CAP_EV_MONITOR_TO:
2930 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2931 			l2cap_send_rr_or_rnr(chan, 1);
2932 			__set_monitor_timer(chan);
2933 			chan->retry_count++;
2934 		} else {
2935 			l2cap_send_disconn_req(chan, ECONNABORTED);
2936 		}
2937 		break;
2938 	default:
2939 		break;
2940 	}
2941 }
2942 
2943 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2944 		     struct sk_buff_head *skbs, u8 event)
2945 {
2946 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2947 	       chan, control, skbs, event, chan->tx_state);
2948 
2949 	switch (chan->tx_state) {
2950 	case L2CAP_TX_STATE_XMIT:
2951 		l2cap_tx_state_xmit(chan, control, skbs, event);
2952 		break;
2953 	case L2CAP_TX_STATE_WAIT_F:
2954 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2955 		break;
2956 	default:
2957 		/* Ignore event */
2958 		break;
2959 	}
2960 }
2961 
2962 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2963 			     struct l2cap_ctrl *control)
2964 {
2965 	BT_DBG("chan %p, control %p", chan, control);
2966 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2967 }
2968 
2969 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2970 				  struct l2cap_ctrl *control)
2971 {
2972 	BT_DBG("chan %p, control %p", chan, control);
2973 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2974 }
2975 
2976 /* Copy frame to all raw sockets on that connection */
2977 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2978 {
2979 	struct sk_buff *nskb;
2980 	struct l2cap_chan *chan;
2981 
2982 	BT_DBG("conn %p", conn);
2983 
2984 	mutex_lock(&conn->chan_lock);
2985 
2986 	list_for_each_entry(chan, &conn->chan_l, list) {
2987 		if (chan->chan_type != L2CAP_CHAN_RAW)
2988 			continue;
2989 
2990 		/* Don't send frame to the channel it came from */
2991 		if (bt_cb(skb)->l2cap.chan == chan)
2992 			continue;
2993 
2994 		nskb = skb_clone(skb, GFP_KERNEL);
2995 		if (!nskb)
2996 			continue;
2997 		if (chan->ops->recv(chan, nskb))
2998 			kfree_skb(nskb);
2999 	}
3000 
3001 	mutex_unlock(&conn->chan_lock);
3002 }
3003 
3004 /* ---- L2CAP signalling commands ---- */
3005 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3006 				       u8 ident, u16 dlen, void *data)
3007 {
3008 	struct sk_buff *skb, **frag;
3009 	struct l2cap_cmd_hdr *cmd;
3010 	struct l2cap_hdr *lh;
3011 	int len, count;
3012 
3013 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3014 	       conn, code, ident, dlen);
3015 
3016 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3017 		return NULL;
3018 
3019 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3020 	count = min_t(unsigned int, conn->mtu, len);
3021 
3022 	skb = bt_skb_alloc(count, GFP_KERNEL);
3023 	if (!skb)
3024 		return NULL;
3025 
3026 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3027 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3028 
3029 	if (conn->hcon->type == LE_LINK)
3030 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3031 	else
3032 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3033 
3034 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3035 	cmd->code  = code;
3036 	cmd->ident = ident;
3037 	cmd->len   = cpu_to_le16(dlen);
3038 
3039 	if (dlen) {
3040 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3041 		skb_put_data(skb, data, count);
3042 		data += count;
3043 	}
3044 
3045 	len -= skb->len;
3046 
3047 	/* Continuation fragments (no L2CAP header) */
3048 	frag = &skb_shinfo(skb)->frag_list;
3049 	while (len) {
3050 		count = min_t(unsigned int, conn->mtu, len);
3051 
3052 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3053 		if (!*frag)
3054 			goto fail;
3055 
3056 		skb_put_data(*frag, data, count);
3057 
3058 		len  -= count;
3059 		data += count;
3060 
3061 		frag = &(*frag)->next;
3062 	}
3063 
3064 	return skb;
3065 
3066 fail:
3067 	kfree_skb(skb);
3068 	return NULL;
3069 }
3070 
3071 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3072 				     unsigned long *val)
3073 {
3074 	struct l2cap_conf_opt *opt = *ptr;
3075 	int len;
3076 
3077 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3078 	*ptr += len;
3079 
3080 	*type = opt->type;
3081 	*olen = opt->len;
3082 
3083 	switch (opt->len) {
3084 	case 1:
3085 		*val = *((u8 *) opt->val);
3086 		break;
3087 
3088 	case 2:
3089 		*val = get_unaligned_le16(opt->val);
3090 		break;
3091 
3092 	case 4:
3093 		*val = get_unaligned_le32(opt->val);
3094 		break;
3095 
3096 	default:
3097 		*val = (unsigned long) opt->val;
3098 		break;
3099 	}
3100 
3101 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3102 	return len;
3103 }
3104 
3105 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3106 {
3107 	struct l2cap_conf_opt *opt = *ptr;
3108 
3109 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3110 
3111 	if (size < L2CAP_CONF_OPT_SIZE + len)
3112 		return;
3113 
3114 	opt->type = type;
3115 	opt->len  = len;
3116 
3117 	switch (len) {
3118 	case 1:
3119 		*((u8 *) opt->val)  = val;
3120 		break;
3121 
3122 	case 2:
3123 		put_unaligned_le16(val, opt->val);
3124 		break;
3125 
3126 	case 4:
3127 		put_unaligned_le32(val, opt->val);
3128 		break;
3129 
3130 	default:
3131 		memcpy(opt->val, (void *) val, len);
3132 		break;
3133 	}
3134 
3135 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3136 }
3137 
3138 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3139 {
3140 	struct l2cap_conf_efs efs;
3141 
3142 	switch (chan->mode) {
3143 	case L2CAP_MODE_ERTM:
3144 		efs.id		= chan->local_id;
3145 		efs.stype	= chan->local_stype;
3146 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3147 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3148 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3149 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3150 		break;
3151 
3152 	case L2CAP_MODE_STREAMING:
3153 		efs.id		= 1;
3154 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3155 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3156 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3157 		efs.acc_lat	= 0;
3158 		efs.flush_to	= 0;
3159 		break;
3160 
3161 	default:
3162 		return;
3163 	}
3164 
3165 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3166 			   (unsigned long) &efs, size);
3167 }
3168 
3169 static void l2cap_ack_timeout(struct work_struct *work)
3170 {
3171 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3172 					       ack_timer.work);
3173 	u16 frames_to_ack;
3174 
3175 	BT_DBG("chan %p", chan);
3176 
3177 	l2cap_chan_lock(chan);
3178 
3179 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3180 				     chan->last_acked_seq);
3181 
3182 	if (frames_to_ack)
3183 		l2cap_send_rr_or_rnr(chan, 0);
3184 
3185 	l2cap_chan_unlock(chan);
3186 	l2cap_chan_put(chan);
3187 }
3188 
3189 int l2cap_ertm_init(struct l2cap_chan *chan)
3190 {
3191 	int err;
3192 
3193 	chan->next_tx_seq = 0;
3194 	chan->expected_tx_seq = 0;
3195 	chan->expected_ack_seq = 0;
3196 	chan->unacked_frames = 0;
3197 	chan->buffer_seq = 0;
3198 	chan->frames_sent = 0;
3199 	chan->last_acked_seq = 0;
3200 	chan->sdu = NULL;
3201 	chan->sdu_last_frag = NULL;
3202 	chan->sdu_len = 0;
3203 
3204 	skb_queue_head_init(&chan->tx_q);
3205 
3206 	chan->local_amp_id = AMP_ID_BREDR;
3207 	chan->move_id = AMP_ID_BREDR;
3208 	chan->move_state = L2CAP_MOVE_STABLE;
3209 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3210 
3211 	if (chan->mode != L2CAP_MODE_ERTM)
3212 		return 0;
3213 
3214 	chan->rx_state = L2CAP_RX_STATE_RECV;
3215 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3216 
3217 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3218 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3219 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3220 
3221 	skb_queue_head_init(&chan->srej_q);
3222 
3223 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3224 	if (err < 0)
3225 		return err;
3226 
3227 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3228 	if (err < 0)
3229 		l2cap_seq_list_free(&chan->srej_list);
3230 
3231 	return err;
3232 }
3233 
3234 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3235 {
3236 	switch (mode) {
3237 	case L2CAP_MODE_STREAMING:
3238 	case L2CAP_MODE_ERTM:
3239 		if (l2cap_mode_supported(mode, remote_feat_mask))
3240 			return mode;
3241 		/* fall through */
3242 	default:
3243 		return L2CAP_MODE_BASIC;
3244 	}
3245 }
3246 
3247 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3248 {
3249 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3250 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3251 }
3252 
3253 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3254 {
3255 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3256 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3257 }
3258 
3259 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3260 				      struct l2cap_conf_rfc *rfc)
3261 {
3262 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3263 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3264 
3265 		/* Class 1 devices have must have ERTM timeouts
3266 		 * exceeding the Link Supervision Timeout.  The
3267 		 * default Link Supervision Timeout for AMP
3268 		 * controllers is 10 seconds.
3269 		 *
3270 		 * Class 1 devices use 0xffffffff for their
3271 		 * best-effort flush timeout, so the clamping logic
3272 		 * will result in a timeout that meets the above
3273 		 * requirement.  ERTM timeouts are 16-bit values, so
3274 		 * the maximum timeout is 65.535 seconds.
3275 		 */
3276 
3277 		/* Convert timeout to milliseconds and round */
3278 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3279 
3280 		/* This is the recommended formula for class 2 devices
3281 		 * that start ERTM timers when packets are sent to the
3282 		 * controller.
3283 		 */
3284 		ertm_to = 3 * ertm_to + 500;
3285 
3286 		if (ertm_to > 0xffff)
3287 			ertm_to = 0xffff;
3288 
3289 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3290 		rfc->monitor_timeout = rfc->retrans_timeout;
3291 	} else {
3292 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3293 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3294 	}
3295 }
3296 
3297 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3298 {
3299 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3300 	    __l2cap_ews_supported(chan->conn)) {
3301 		/* use extended control field */
3302 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3303 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3304 	} else {
3305 		chan->tx_win = min_t(u16, chan->tx_win,
3306 				     L2CAP_DEFAULT_TX_WINDOW);
3307 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3308 	}
3309 	chan->ack_win = chan->tx_win;
3310 }
3311 
3312 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3313 {
3314 	struct hci_conn *conn = chan->conn->hcon;
3315 
3316 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3317 
3318 	/* The 2-DH1 packet has between 2 and 56 information bytes
3319 	 * (including the 2-byte payload header)
3320 	 */
3321 	if (!(conn->pkt_type & HCI_2DH1))
3322 		chan->imtu = 54;
3323 
3324 	/* The 3-DH1 packet has between 2 and 85 information bytes
3325 	 * (including the 2-byte payload header)
3326 	 */
3327 	if (!(conn->pkt_type & HCI_3DH1))
3328 		chan->imtu = 83;
3329 
3330 	/* The 2-DH3 packet has between 2 and 369 information bytes
3331 	 * (including the 2-byte payload header)
3332 	 */
3333 	if (!(conn->pkt_type & HCI_2DH3))
3334 		chan->imtu = 367;
3335 
3336 	/* The 3-DH3 packet has between 2 and 554 information bytes
3337 	 * (including the 2-byte payload header)
3338 	 */
3339 	if (!(conn->pkt_type & HCI_3DH3))
3340 		chan->imtu = 552;
3341 
3342 	/* The 2-DH5 packet has between 2 and 681 information bytes
3343 	 * (including the 2-byte payload header)
3344 	 */
3345 	if (!(conn->pkt_type & HCI_2DH5))
3346 		chan->imtu = 679;
3347 
3348 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3349 	 * (including the 2-byte payload header)
3350 	 */
3351 	if (!(conn->pkt_type & HCI_3DH5))
3352 		chan->imtu = 1021;
3353 }
3354 
3355 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3356 {
3357 	struct l2cap_conf_req *req = data;
3358 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3359 	void *ptr = req->data;
3360 	void *endptr = data + data_size;
3361 	u16 size;
3362 
3363 	BT_DBG("chan %p", chan);
3364 
3365 	if (chan->num_conf_req || chan->num_conf_rsp)
3366 		goto done;
3367 
3368 	switch (chan->mode) {
3369 	case L2CAP_MODE_STREAMING:
3370 	case L2CAP_MODE_ERTM:
3371 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3372 			break;
3373 
3374 		if (__l2cap_efs_supported(chan->conn))
3375 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3376 
3377 		/* fall through */
3378 	default:
3379 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3380 		break;
3381 	}
3382 
3383 done:
3384 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3385 		if (!chan->imtu)
3386 			l2cap_mtu_auto(chan);
3387 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3388 				   endptr - ptr);
3389 	}
3390 
3391 	switch (chan->mode) {
3392 	case L2CAP_MODE_BASIC:
3393 		if (disable_ertm)
3394 			break;
3395 
3396 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3397 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3398 			break;
3399 
3400 		rfc.mode            = L2CAP_MODE_BASIC;
3401 		rfc.txwin_size      = 0;
3402 		rfc.max_transmit    = 0;
3403 		rfc.retrans_timeout = 0;
3404 		rfc.monitor_timeout = 0;
3405 		rfc.max_pdu_size    = 0;
3406 
3407 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3408 				   (unsigned long) &rfc, endptr - ptr);
3409 		break;
3410 
3411 	case L2CAP_MODE_ERTM:
3412 		rfc.mode            = L2CAP_MODE_ERTM;
3413 		rfc.max_transmit    = chan->max_tx;
3414 
3415 		__l2cap_set_ertm_timeouts(chan, &rfc);
3416 
3417 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3418 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3419 			     L2CAP_FCS_SIZE);
3420 		rfc.max_pdu_size = cpu_to_le16(size);
3421 
3422 		l2cap_txwin_setup(chan);
3423 
3424 		rfc.txwin_size = min_t(u16, chan->tx_win,
3425 				       L2CAP_DEFAULT_TX_WINDOW);
3426 
3427 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3428 				   (unsigned long) &rfc, endptr - ptr);
3429 
3430 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3431 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3432 
3433 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3434 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3435 					   chan->tx_win, endptr - ptr);
3436 
3437 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3438 			if (chan->fcs == L2CAP_FCS_NONE ||
3439 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3440 				chan->fcs = L2CAP_FCS_NONE;
3441 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3442 						   chan->fcs, endptr - ptr);
3443 			}
3444 		break;
3445 
3446 	case L2CAP_MODE_STREAMING:
3447 		l2cap_txwin_setup(chan);
3448 		rfc.mode            = L2CAP_MODE_STREAMING;
3449 		rfc.txwin_size      = 0;
3450 		rfc.max_transmit    = 0;
3451 		rfc.retrans_timeout = 0;
3452 		rfc.monitor_timeout = 0;
3453 
3454 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3455 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3456 			     L2CAP_FCS_SIZE);
3457 		rfc.max_pdu_size = cpu_to_le16(size);
3458 
3459 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3460 				   (unsigned long) &rfc, endptr - ptr);
3461 
3462 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3463 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3464 
3465 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3466 			if (chan->fcs == L2CAP_FCS_NONE ||
3467 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3468 				chan->fcs = L2CAP_FCS_NONE;
3469 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3470 						   chan->fcs, endptr - ptr);
3471 			}
3472 		break;
3473 	}
3474 
3475 	req->dcid  = cpu_to_le16(chan->dcid);
3476 	req->flags = cpu_to_le16(0);
3477 
3478 	return ptr - data;
3479 }
3480 
3481 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3482 {
3483 	struct l2cap_conf_rsp *rsp = data;
3484 	void *ptr = rsp->data;
3485 	void *endptr = data + data_size;
3486 	void *req = chan->conf_req;
3487 	int len = chan->conf_len;
3488 	int type, hint, olen;
3489 	unsigned long val;
3490 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3491 	struct l2cap_conf_efs efs;
3492 	u8 remote_efs = 0;
3493 	u16 mtu = L2CAP_DEFAULT_MTU;
3494 	u16 result = L2CAP_CONF_SUCCESS;
3495 	u16 size;
3496 
3497 	BT_DBG("chan %p", chan);
3498 
3499 	while (len >= L2CAP_CONF_OPT_SIZE) {
3500 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3501 		if (len < 0)
3502 			break;
3503 
3504 		hint  = type & L2CAP_CONF_HINT;
3505 		type &= L2CAP_CONF_MASK;
3506 
3507 		switch (type) {
3508 		case L2CAP_CONF_MTU:
3509 			if (olen != 2)
3510 				break;
3511 			mtu = val;
3512 			break;
3513 
3514 		case L2CAP_CONF_FLUSH_TO:
3515 			if (olen != 2)
3516 				break;
3517 			chan->flush_to = val;
3518 			break;
3519 
3520 		case L2CAP_CONF_QOS:
3521 			break;
3522 
3523 		case L2CAP_CONF_RFC:
3524 			if (olen != sizeof(rfc))
3525 				break;
3526 			memcpy(&rfc, (void *) val, olen);
3527 			break;
3528 
3529 		case L2CAP_CONF_FCS:
3530 			if (olen != 1)
3531 				break;
3532 			if (val == L2CAP_FCS_NONE)
3533 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3534 			break;
3535 
3536 		case L2CAP_CONF_EFS:
3537 			if (olen != sizeof(efs))
3538 				break;
3539 			remote_efs = 1;
3540 			memcpy(&efs, (void *) val, olen);
3541 			break;
3542 
3543 		case L2CAP_CONF_EWS:
3544 			if (olen != 2)
3545 				break;
3546 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3547 				return -ECONNREFUSED;
3548 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3549 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3550 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3551 			chan->remote_tx_win = val;
3552 			break;
3553 
3554 		default:
3555 			if (hint)
3556 				break;
3557 			result = L2CAP_CONF_UNKNOWN;
3558 			*((u8 *) ptr++) = type;
3559 			break;
3560 		}
3561 	}
3562 
3563 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3564 		goto done;
3565 
3566 	switch (chan->mode) {
3567 	case L2CAP_MODE_STREAMING:
3568 	case L2CAP_MODE_ERTM:
3569 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3570 			chan->mode = l2cap_select_mode(rfc.mode,
3571 						       chan->conn->feat_mask);
3572 			break;
3573 		}
3574 
3575 		if (remote_efs) {
3576 			if (__l2cap_efs_supported(chan->conn))
3577 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3578 			else
3579 				return -ECONNREFUSED;
3580 		}
3581 
3582 		if (chan->mode != rfc.mode)
3583 			return -ECONNREFUSED;
3584 
3585 		break;
3586 	}
3587 
3588 done:
3589 	if (chan->mode != rfc.mode) {
3590 		result = L2CAP_CONF_UNACCEPT;
3591 		rfc.mode = chan->mode;
3592 
3593 		if (chan->num_conf_rsp == 1)
3594 			return -ECONNREFUSED;
3595 
3596 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3597 				   (unsigned long) &rfc, endptr - ptr);
3598 	}
3599 
3600 	if (result == L2CAP_CONF_SUCCESS) {
3601 		/* Configure output options and let the other side know
3602 		 * which ones we don't like. */
3603 
3604 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3605 			result = L2CAP_CONF_UNACCEPT;
3606 		else {
3607 			chan->omtu = mtu;
3608 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3609 		}
3610 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3611 
3612 		if (remote_efs) {
3613 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3614 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3615 			    efs.stype != chan->local_stype) {
3616 
3617 				result = L2CAP_CONF_UNACCEPT;
3618 
3619 				if (chan->num_conf_req >= 1)
3620 					return -ECONNREFUSED;
3621 
3622 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3623 						   sizeof(efs),
3624 						   (unsigned long) &efs, endptr - ptr);
3625 			} else {
3626 				/* Send PENDING Conf Rsp */
3627 				result = L2CAP_CONF_PENDING;
3628 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3629 			}
3630 		}
3631 
3632 		switch (rfc.mode) {
3633 		case L2CAP_MODE_BASIC:
3634 			chan->fcs = L2CAP_FCS_NONE;
3635 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3636 			break;
3637 
3638 		case L2CAP_MODE_ERTM:
3639 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3640 				chan->remote_tx_win = rfc.txwin_size;
3641 			else
3642 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3643 
3644 			chan->remote_max_tx = rfc.max_transmit;
3645 
3646 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3647 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3648 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3649 			rfc.max_pdu_size = cpu_to_le16(size);
3650 			chan->remote_mps = size;
3651 
3652 			__l2cap_set_ertm_timeouts(chan, &rfc);
3653 
3654 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3655 
3656 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3657 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3658 
3659 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3660 				chan->remote_id = efs.id;
3661 				chan->remote_stype = efs.stype;
3662 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3663 				chan->remote_flush_to =
3664 					le32_to_cpu(efs.flush_to);
3665 				chan->remote_acc_lat =
3666 					le32_to_cpu(efs.acc_lat);
3667 				chan->remote_sdu_itime =
3668 					le32_to_cpu(efs.sdu_itime);
3669 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3670 						   sizeof(efs),
3671 						   (unsigned long) &efs, endptr - ptr);
3672 			}
3673 			break;
3674 
3675 		case L2CAP_MODE_STREAMING:
3676 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3677 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3678 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3679 			rfc.max_pdu_size = cpu_to_le16(size);
3680 			chan->remote_mps = size;
3681 
3682 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3683 
3684 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3685 					   (unsigned long) &rfc, endptr - ptr);
3686 
3687 			break;
3688 
3689 		default:
3690 			result = L2CAP_CONF_UNACCEPT;
3691 
3692 			memset(&rfc, 0, sizeof(rfc));
3693 			rfc.mode = chan->mode;
3694 		}
3695 
3696 		if (result == L2CAP_CONF_SUCCESS)
3697 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3698 	}
3699 	rsp->scid   = cpu_to_le16(chan->dcid);
3700 	rsp->result = cpu_to_le16(result);
3701 	rsp->flags  = cpu_to_le16(0);
3702 
3703 	return ptr - data;
3704 }
3705 
3706 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3707 				void *data, size_t size, u16 *result)
3708 {
3709 	struct l2cap_conf_req *req = data;
3710 	void *ptr = req->data;
3711 	void *endptr = data + size;
3712 	int type, olen;
3713 	unsigned long val;
3714 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3715 	struct l2cap_conf_efs efs;
3716 
3717 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3718 
3719 	while (len >= L2CAP_CONF_OPT_SIZE) {
3720 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3721 		if (len < 0)
3722 			break;
3723 
3724 		switch (type) {
3725 		case L2CAP_CONF_MTU:
3726 			if (olen != 2)
3727 				break;
3728 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3729 				*result = L2CAP_CONF_UNACCEPT;
3730 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3731 			} else
3732 				chan->imtu = val;
3733 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3734 					   endptr - ptr);
3735 			break;
3736 
3737 		case L2CAP_CONF_FLUSH_TO:
3738 			if (olen != 2)
3739 				break;
3740 			chan->flush_to = val;
3741 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3742 					   chan->flush_to, endptr - ptr);
3743 			break;
3744 
3745 		case L2CAP_CONF_RFC:
3746 			if (olen != sizeof(rfc))
3747 				break;
3748 			memcpy(&rfc, (void *)val, olen);
3749 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3750 			    rfc.mode != chan->mode)
3751 				return -ECONNREFUSED;
3752 			chan->fcs = 0;
3753 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3754 					   (unsigned long) &rfc, endptr - ptr);
3755 			break;
3756 
3757 		case L2CAP_CONF_EWS:
3758 			if (olen != 2)
3759 				break;
3760 			chan->ack_win = min_t(u16, val, chan->ack_win);
3761 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3762 					   chan->tx_win, endptr - ptr);
3763 			break;
3764 
3765 		case L2CAP_CONF_EFS:
3766 			if (olen != sizeof(efs))
3767 				break;
3768 			memcpy(&efs, (void *)val, olen);
3769 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3770 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3771 			    efs.stype != chan->local_stype)
3772 				return -ECONNREFUSED;
3773 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3774 					   (unsigned long) &efs, endptr - ptr);
3775 			break;
3776 
3777 		case L2CAP_CONF_FCS:
3778 			if (olen != 1)
3779 				break;
3780 			if (*result == L2CAP_CONF_PENDING)
3781 				if (val == L2CAP_FCS_NONE)
3782 					set_bit(CONF_RECV_NO_FCS,
3783 						&chan->conf_state);
3784 			break;
3785 		}
3786 	}
3787 
3788 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3789 		return -ECONNREFUSED;
3790 
3791 	chan->mode = rfc.mode;
3792 
3793 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3794 		switch (rfc.mode) {
3795 		case L2CAP_MODE_ERTM:
3796 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3797 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3798 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3799 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3800 				chan->ack_win = min_t(u16, chan->ack_win,
3801 						      rfc.txwin_size);
3802 
3803 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3804 				chan->local_msdu = le16_to_cpu(efs.msdu);
3805 				chan->local_sdu_itime =
3806 					le32_to_cpu(efs.sdu_itime);
3807 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3808 				chan->local_flush_to =
3809 					le32_to_cpu(efs.flush_to);
3810 			}
3811 			break;
3812 
3813 		case L2CAP_MODE_STREAMING:
3814 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3815 		}
3816 	}
3817 
3818 	req->dcid   = cpu_to_le16(chan->dcid);
3819 	req->flags  = cpu_to_le16(0);
3820 
3821 	return ptr - data;
3822 }
3823 
3824 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3825 				u16 result, u16 flags)
3826 {
3827 	struct l2cap_conf_rsp *rsp = data;
3828 	void *ptr = rsp->data;
3829 
3830 	BT_DBG("chan %p", chan);
3831 
3832 	rsp->scid   = cpu_to_le16(chan->dcid);
3833 	rsp->result = cpu_to_le16(result);
3834 	rsp->flags  = cpu_to_le16(flags);
3835 
3836 	return ptr - data;
3837 }
3838 
3839 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3840 {
3841 	struct l2cap_le_conn_rsp rsp;
3842 	struct l2cap_conn *conn = chan->conn;
3843 
3844 	BT_DBG("chan %p", chan);
3845 
3846 	rsp.dcid    = cpu_to_le16(chan->scid);
3847 	rsp.mtu     = cpu_to_le16(chan->imtu);
3848 	rsp.mps     = cpu_to_le16(chan->mps);
3849 	rsp.credits = cpu_to_le16(chan->rx_credits);
3850 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3851 
3852 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3853 		       &rsp);
3854 }
3855 
3856 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3857 {
3858 	struct {
3859 		struct l2cap_ecred_conn_rsp rsp;
3860 		__le16 dcid[5];
3861 	} __packed pdu;
3862 	struct l2cap_conn *conn = chan->conn;
3863 	u16 ident = chan->ident;
3864 	int i = 0;
3865 
3866 	if (!ident)
3867 		return;
3868 
3869 	BT_DBG("chan %p ident %d", chan, ident);
3870 
3871 	pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3872 	pdu.rsp.mps     = cpu_to_le16(chan->mps);
3873 	pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3874 	pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3875 
3876 	mutex_lock(&conn->chan_lock);
3877 
3878 	list_for_each_entry(chan, &conn->chan_l, list) {
3879 		if (chan->ident != ident)
3880 			continue;
3881 
3882 		/* Reset ident so only one response is sent */
3883 		chan->ident = 0;
3884 
3885 		/* Include all channels pending with the same ident */
3886 		pdu.dcid[i++] = cpu_to_le16(chan->scid);
3887 	}
3888 
3889 	mutex_unlock(&conn->chan_lock);
3890 
3891 	l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3892 			sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3893 }
3894 
3895 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3896 {
3897 	struct l2cap_conn_rsp rsp;
3898 	struct l2cap_conn *conn = chan->conn;
3899 	u8 buf[128];
3900 	u8 rsp_code;
3901 
3902 	rsp.scid   = cpu_to_le16(chan->dcid);
3903 	rsp.dcid   = cpu_to_le16(chan->scid);
3904 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3905 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3906 
3907 	if (chan->hs_hcon)
3908 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3909 	else
3910 		rsp_code = L2CAP_CONN_RSP;
3911 
3912 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3913 
3914 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3915 
3916 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3917 		return;
3918 
3919 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3920 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3921 	chan->num_conf_req++;
3922 }
3923 
3924 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3925 {
3926 	int type, olen;
3927 	unsigned long val;
3928 	/* Use sane default values in case a misbehaving remote device
3929 	 * did not send an RFC or extended window size option.
3930 	 */
3931 	u16 txwin_ext = chan->ack_win;
3932 	struct l2cap_conf_rfc rfc = {
3933 		.mode = chan->mode,
3934 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3935 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3936 		.max_pdu_size = cpu_to_le16(chan->imtu),
3937 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3938 	};
3939 
3940 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3941 
3942 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3943 		return;
3944 
3945 	while (len >= L2CAP_CONF_OPT_SIZE) {
3946 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3947 		if (len < 0)
3948 			break;
3949 
3950 		switch (type) {
3951 		case L2CAP_CONF_RFC:
3952 			if (olen != sizeof(rfc))
3953 				break;
3954 			memcpy(&rfc, (void *)val, olen);
3955 			break;
3956 		case L2CAP_CONF_EWS:
3957 			if (olen != 2)
3958 				break;
3959 			txwin_ext = val;
3960 			break;
3961 		}
3962 	}
3963 
3964 	switch (rfc.mode) {
3965 	case L2CAP_MODE_ERTM:
3966 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3967 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3968 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3969 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3970 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3971 		else
3972 			chan->ack_win = min_t(u16, chan->ack_win,
3973 					      rfc.txwin_size);
3974 		break;
3975 	case L2CAP_MODE_STREAMING:
3976 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3977 	}
3978 }
3979 
3980 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3981 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3982 				    u8 *data)
3983 {
3984 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3985 
3986 	if (cmd_len < sizeof(*rej))
3987 		return -EPROTO;
3988 
3989 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3990 		return 0;
3991 
3992 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3993 	    cmd->ident == conn->info_ident) {
3994 		cancel_delayed_work(&conn->info_timer);
3995 
3996 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3997 		conn->info_ident = 0;
3998 
3999 		l2cap_conn_start(conn);
4000 	}
4001 
4002 	return 0;
4003 }
4004 
4005 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4006 					struct l2cap_cmd_hdr *cmd,
4007 					u8 *data, u8 rsp_code, u8 amp_id)
4008 {
4009 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4010 	struct l2cap_conn_rsp rsp;
4011 	struct l2cap_chan *chan = NULL, *pchan;
4012 	int result, status = L2CAP_CS_NO_INFO;
4013 
4014 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4015 	__le16 psm = req->psm;
4016 
4017 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4018 
4019 	/* Check if we have socket listening on psm */
4020 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4021 					 &conn->hcon->dst, ACL_LINK);
4022 	if (!pchan) {
4023 		result = L2CAP_CR_BAD_PSM;
4024 		goto sendresp;
4025 	}
4026 
4027 	mutex_lock(&conn->chan_lock);
4028 	l2cap_chan_lock(pchan);
4029 
4030 	/* Check if the ACL is secure enough (if not SDP) */
4031 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4032 	    !hci_conn_check_link_mode(conn->hcon)) {
4033 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4034 		result = L2CAP_CR_SEC_BLOCK;
4035 		goto response;
4036 	}
4037 
4038 	result = L2CAP_CR_NO_MEM;
4039 
4040 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4041 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4042 		result = L2CAP_CR_INVALID_SCID;
4043 		goto response;
4044 	}
4045 
4046 	/* Check if we already have channel with that dcid */
4047 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4048 		result = L2CAP_CR_SCID_IN_USE;
4049 		goto response;
4050 	}
4051 
4052 	chan = pchan->ops->new_connection(pchan);
4053 	if (!chan)
4054 		goto response;
4055 
4056 	/* For certain devices (ex: HID mouse), support for authentication,
4057 	 * pairing and bonding is optional. For such devices, inorder to avoid
4058 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4059 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4060 	 */
4061 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4062 
4063 	bacpy(&chan->src, &conn->hcon->src);
4064 	bacpy(&chan->dst, &conn->hcon->dst);
4065 	chan->src_type = bdaddr_src_type(conn->hcon);
4066 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4067 	chan->psm  = psm;
4068 	chan->dcid = scid;
4069 	chan->local_amp_id = amp_id;
4070 
4071 	__l2cap_chan_add(conn, chan);
4072 
4073 	dcid = chan->scid;
4074 
4075 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4076 
4077 	chan->ident = cmd->ident;
4078 
4079 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4080 		if (l2cap_chan_check_security(chan, false)) {
4081 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4082 				l2cap_state_change(chan, BT_CONNECT2);
4083 				result = L2CAP_CR_PEND;
4084 				status = L2CAP_CS_AUTHOR_PEND;
4085 				chan->ops->defer(chan);
4086 			} else {
4087 				/* Force pending result for AMP controllers.
4088 				 * The connection will succeed after the
4089 				 * physical link is up.
4090 				 */
4091 				if (amp_id == AMP_ID_BREDR) {
4092 					l2cap_state_change(chan, BT_CONFIG);
4093 					result = L2CAP_CR_SUCCESS;
4094 				} else {
4095 					l2cap_state_change(chan, BT_CONNECT2);
4096 					result = L2CAP_CR_PEND;
4097 				}
4098 				status = L2CAP_CS_NO_INFO;
4099 			}
4100 		} else {
4101 			l2cap_state_change(chan, BT_CONNECT2);
4102 			result = L2CAP_CR_PEND;
4103 			status = L2CAP_CS_AUTHEN_PEND;
4104 		}
4105 	} else {
4106 		l2cap_state_change(chan, BT_CONNECT2);
4107 		result = L2CAP_CR_PEND;
4108 		status = L2CAP_CS_NO_INFO;
4109 	}
4110 
4111 response:
4112 	l2cap_chan_unlock(pchan);
4113 	mutex_unlock(&conn->chan_lock);
4114 	l2cap_chan_put(pchan);
4115 
4116 sendresp:
4117 	rsp.scid   = cpu_to_le16(scid);
4118 	rsp.dcid   = cpu_to_le16(dcid);
4119 	rsp.result = cpu_to_le16(result);
4120 	rsp.status = cpu_to_le16(status);
4121 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4122 
4123 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4124 		struct l2cap_info_req info;
4125 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4126 
4127 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4128 		conn->info_ident = l2cap_get_ident(conn);
4129 
4130 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4131 
4132 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4133 			       sizeof(info), &info);
4134 	}
4135 
4136 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4137 	    result == L2CAP_CR_SUCCESS) {
4138 		u8 buf[128];
4139 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4140 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4141 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4142 		chan->num_conf_req++;
4143 	}
4144 
4145 	return chan;
4146 }
4147 
4148 static int l2cap_connect_req(struct l2cap_conn *conn,
4149 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4150 {
4151 	struct hci_dev *hdev = conn->hcon->hdev;
4152 	struct hci_conn *hcon = conn->hcon;
4153 
4154 	if (cmd_len < sizeof(struct l2cap_conn_req))
4155 		return -EPROTO;
4156 
4157 	hci_dev_lock(hdev);
4158 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4159 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4160 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4161 	hci_dev_unlock(hdev);
4162 
4163 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4164 	return 0;
4165 }
4166 
4167 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4168 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4169 				    u8 *data)
4170 {
4171 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4172 	u16 scid, dcid, result, status;
4173 	struct l2cap_chan *chan;
4174 	u8 req[128];
4175 	int err;
4176 
4177 	if (cmd_len < sizeof(*rsp))
4178 		return -EPROTO;
4179 
4180 	scid   = __le16_to_cpu(rsp->scid);
4181 	dcid   = __le16_to_cpu(rsp->dcid);
4182 	result = __le16_to_cpu(rsp->result);
4183 	status = __le16_to_cpu(rsp->status);
4184 
4185 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4186 	       dcid, scid, result, status);
4187 
4188 	mutex_lock(&conn->chan_lock);
4189 
4190 	if (scid) {
4191 		chan = __l2cap_get_chan_by_scid(conn, scid);
4192 		if (!chan) {
4193 			err = -EBADSLT;
4194 			goto unlock;
4195 		}
4196 	} else {
4197 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4198 		if (!chan) {
4199 			err = -EBADSLT;
4200 			goto unlock;
4201 		}
4202 	}
4203 
4204 	err = 0;
4205 
4206 	l2cap_chan_lock(chan);
4207 
4208 	switch (result) {
4209 	case L2CAP_CR_SUCCESS:
4210 		l2cap_state_change(chan, BT_CONFIG);
4211 		chan->ident = 0;
4212 		chan->dcid = dcid;
4213 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4214 
4215 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4216 			break;
4217 
4218 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4219 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4220 		chan->num_conf_req++;
4221 		break;
4222 
4223 	case L2CAP_CR_PEND:
4224 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4225 		break;
4226 
4227 	default:
4228 		l2cap_chan_del(chan, ECONNREFUSED);
4229 		break;
4230 	}
4231 
4232 	l2cap_chan_unlock(chan);
4233 
4234 unlock:
4235 	mutex_unlock(&conn->chan_lock);
4236 
4237 	return err;
4238 }
4239 
4240 static inline void set_default_fcs(struct l2cap_chan *chan)
4241 {
4242 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4243 	 * sides request it.
4244 	 */
4245 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4246 		chan->fcs = L2CAP_FCS_NONE;
4247 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4248 		chan->fcs = L2CAP_FCS_CRC16;
4249 }
4250 
4251 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4252 				    u8 ident, u16 flags)
4253 {
4254 	struct l2cap_conn *conn = chan->conn;
4255 
4256 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4257 	       flags);
4258 
4259 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4260 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4261 
4262 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4263 		       l2cap_build_conf_rsp(chan, data,
4264 					    L2CAP_CONF_SUCCESS, flags), data);
4265 }
4266 
4267 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4268 				   u16 scid, u16 dcid)
4269 {
4270 	struct l2cap_cmd_rej_cid rej;
4271 
4272 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4273 	rej.scid = __cpu_to_le16(scid);
4274 	rej.dcid = __cpu_to_le16(dcid);
4275 
4276 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4277 }
4278 
4279 static inline int l2cap_config_req(struct l2cap_conn *conn,
4280 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4281 				   u8 *data)
4282 {
4283 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4284 	u16 dcid, flags;
4285 	u8 rsp[64];
4286 	struct l2cap_chan *chan;
4287 	int len, err = 0;
4288 
4289 	if (cmd_len < sizeof(*req))
4290 		return -EPROTO;
4291 
4292 	dcid  = __le16_to_cpu(req->dcid);
4293 	flags = __le16_to_cpu(req->flags);
4294 
4295 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4296 
4297 	chan = l2cap_get_chan_by_scid(conn, dcid);
4298 	if (!chan) {
4299 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4300 		return 0;
4301 	}
4302 
4303 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4304 	    chan->state != BT_CONNECTED) {
4305 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4306 				       chan->dcid);
4307 		goto unlock;
4308 	}
4309 
4310 	/* Reject if config buffer is too small. */
4311 	len = cmd_len - sizeof(*req);
4312 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4313 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4314 			       l2cap_build_conf_rsp(chan, rsp,
4315 			       L2CAP_CONF_REJECT, flags), rsp);
4316 		goto unlock;
4317 	}
4318 
4319 	/* Store config. */
4320 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4321 	chan->conf_len += len;
4322 
4323 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4324 		/* Incomplete config. Send empty response. */
4325 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4326 			       l2cap_build_conf_rsp(chan, rsp,
4327 			       L2CAP_CONF_SUCCESS, flags), rsp);
4328 		goto unlock;
4329 	}
4330 
4331 	/* Complete config. */
4332 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4333 	if (len < 0) {
4334 		l2cap_send_disconn_req(chan, ECONNRESET);
4335 		goto unlock;
4336 	}
4337 
4338 	chan->ident = cmd->ident;
4339 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4340 	chan->num_conf_rsp++;
4341 
4342 	/* Reset config buffer. */
4343 	chan->conf_len = 0;
4344 
4345 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4346 		goto unlock;
4347 
4348 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4349 		set_default_fcs(chan);
4350 
4351 		if (chan->mode == L2CAP_MODE_ERTM ||
4352 		    chan->mode == L2CAP_MODE_STREAMING)
4353 			err = l2cap_ertm_init(chan);
4354 
4355 		if (err < 0)
4356 			l2cap_send_disconn_req(chan, -err);
4357 		else
4358 			l2cap_chan_ready(chan);
4359 
4360 		goto unlock;
4361 	}
4362 
4363 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4364 		u8 buf[64];
4365 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4366 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4367 		chan->num_conf_req++;
4368 	}
4369 
4370 	/* Got Conf Rsp PENDING from remote side and assume we sent
4371 	   Conf Rsp PENDING in the code above */
4372 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4373 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4374 
4375 		/* check compatibility */
4376 
4377 		/* Send rsp for BR/EDR channel */
4378 		if (!chan->hs_hcon)
4379 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4380 		else
4381 			chan->ident = cmd->ident;
4382 	}
4383 
4384 unlock:
4385 	l2cap_chan_unlock(chan);
4386 	return err;
4387 }
4388 
4389 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4390 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4391 				   u8 *data)
4392 {
4393 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4394 	u16 scid, flags, result;
4395 	struct l2cap_chan *chan;
4396 	int len = cmd_len - sizeof(*rsp);
4397 	int err = 0;
4398 
4399 	if (cmd_len < sizeof(*rsp))
4400 		return -EPROTO;
4401 
4402 	scid   = __le16_to_cpu(rsp->scid);
4403 	flags  = __le16_to_cpu(rsp->flags);
4404 	result = __le16_to_cpu(rsp->result);
4405 
4406 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4407 	       result, len);
4408 
4409 	chan = l2cap_get_chan_by_scid(conn, scid);
4410 	if (!chan)
4411 		return 0;
4412 
4413 	switch (result) {
4414 	case L2CAP_CONF_SUCCESS:
4415 		l2cap_conf_rfc_get(chan, rsp->data, len);
4416 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4417 		break;
4418 
4419 	case L2CAP_CONF_PENDING:
4420 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4421 
4422 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4423 			char buf[64];
4424 
4425 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4426 						   buf, sizeof(buf), &result);
4427 			if (len < 0) {
4428 				l2cap_send_disconn_req(chan, ECONNRESET);
4429 				goto done;
4430 			}
4431 
4432 			if (!chan->hs_hcon) {
4433 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4434 							0);
4435 			} else {
4436 				if (l2cap_check_efs(chan)) {
4437 					amp_create_logical_link(chan);
4438 					chan->ident = cmd->ident;
4439 				}
4440 			}
4441 		}
4442 		goto done;
4443 
4444 	case L2CAP_CONF_UNACCEPT:
4445 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4446 			char req[64];
4447 
4448 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4449 				l2cap_send_disconn_req(chan, ECONNRESET);
4450 				goto done;
4451 			}
4452 
4453 			/* throw out any old stored conf requests */
4454 			result = L2CAP_CONF_SUCCESS;
4455 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4456 						   req, sizeof(req), &result);
4457 			if (len < 0) {
4458 				l2cap_send_disconn_req(chan, ECONNRESET);
4459 				goto done;
4460 			}
4461 
4462 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4463 				       L2CAP_CONF_REQ, len, req);
4464 			chan->num_conf_req++;
4465 			if (result != L2CAP_CONF_SUCCESS)
4466 				goto done;
4467 			break;
4468 		}
4469 		/* fall through */
4470 
4471 	default:
4472 		l2cap_chan_set_err(chan, ECONNRESET);
4473 
4474 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4475 		l2cap_send_disconn_req(chan, ECONNRESET);
4476 		goto done;
4477 	}
4478 
4479 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4480 		goto done;
4481 
4482 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4483 
4484 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4485 		set_default_fcs(chan);
4486 
4487 		if (chan->mode == L2CAP_MODE_ERTM ||
4488 		    chan->mode == L2CAP_MODE_STREAMING)
4489 			err = l2cap_ertm_init(chan);
4490 
4491 		if (err < 0)
4492 			l2cap_send_disconn_req(chan, -err);
4493 		else
4494 			l2cap_chan_ready(chan);
4495 	}
4496 
4497 done:
4498 	l2cap_chan_unlock(chan);
4499 	return err;
4500 }
4501 
4502 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4503 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4504 				       u8 *data)
4505 {
4506 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4507 	struct l2cap_disconn_rsp rsp;
4508 	u16 dcid, scid;
4509 	struct l2cap_chan *chan;
4510 
4511 	if (cmd_len != sizeof(*req))
4512 		return -EPROTO;
4513 
4514 	scid = __le16_to_cpu(req->scid);
4515 	dcid = __le16_to_cpu(req->dcid);
4516 
4517 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4518 
4519 	mutex_lock(&conn->chan_lock);
4520 
4521 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4522 	if (!chan) {
4523 		mutex_unlock(&conn->chan_lock);
4524 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4525 		return 0;
4526 	}
4527 
4528 	l2cap_chan_hold(chan);
4529 	l2cap_chan_lock(chan);
4530 
4531 	rsp.dcid = cpu_to_le16(chan->scid);
4532 	rsp.scid = cpu_to_le16(chan->dcid);
4533 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4534 
4535 	chan->ops->set_shutdown(chan);
4536 
4537 	l2cap_chan_del(chan, ECONNRESET);
4538 
4539 	chan->ops->close(chan);
4540 
4541 	l2cap_chan_unlock(chan);
4542 	l2cap_chan_put(chan);
4543 
4544 	mutex_unlock(&conn->chan_lock);
4545 
4546 	return 0;
4547 }
4548 
4549 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4550 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4551 				       u8 *data)
4552 {
4553 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4554 	u16 dcid, scid;
4555 	struct l2cap_chan *chan;
4556 
4557 	if (cmd_len != sizeof(*rsp))
4558 		return -EPROTO;
4559 
4560 	scid = __le16_to_cpu(rsp->scid);
4561 	dcid = __le16_to_cpu(rsp->dcid);
4562 
4563 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4564 
4565 	mutex_lock(&conn->chan_lock);
4566 
4567 	chan = __l2cap_get_chan_by_scid(conn, scid);
4568 	if (!chan) {
4569 		mutex_unlock(&conn->chan_lock);
4570 		return 0;
4571 	}
4572 
4573 	l2cap_chan_hold(chan);
4574 	l2cap_chan_lock(chan);
4575 
4576 	if (chan->state != BT_DISCONN) {
4577 		l2cap_chan_unlock(chan);
4578 		l2cap_chan_put(chan);
4579 		mutex_unlock(&conn->chan_lock);
4580 		return 0;
4581 	}
4582 
4583 	l2cap_chan_del(chan, 0);
4584 
4585 	chan->ops->close(chan);
4586 
4587 	l2cap_chan_unlock(chan);
4588 	l2cap_chan_put(chan);
4589 
4590 	mutex_unlock(&conn->chan_lock);
4591 
4592 	return 0;
4593 }
4594 
4595 static inline int l2cap_information_req(struct l2cap_conn *conn,
4596 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4597 					u8 *data)
4598 {
4599 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4600 	u16 type;
4601 
4602 	if (cmd_len != sizeof(*req))
4603 		return -EPROTO;
4604 
4605 	type = __le16_to_cpu(req->type);
4606 
4607 	BT_DBG("type 0x%4.4x", type);
4608 
4609 	if (type == L2CAP_IT_FEAT_MASK) {
4610 		u8 buf[8];
4611 		u32 feat_mask = l2cap_feat_mask;
4612 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4613 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4614 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4615 		if (!disable_ertm)
4616 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4617 				| L2CAP_FEAT_FCS;
4618 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4619 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4620 				| L2CAP_FEAT_EXT_WINDOW;
4621 
4622 		put_unaligned_le32(feat_mask, rsp->data);
4623 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4624 			       buf);
4625 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4626 		u8 buf[12];
4627 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4628 
4629 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4630 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4631 		rsp->data[0] = conn->local_fixed_chan;
4632 		memset(rsp->data + 1, 0, 7);
4633 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4634 			       buf);
4635 	} else {
4636 		struct l2cap_info_rsp rsp;
4637 		rsp.type   = cpu_to_le16(type);
4638 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4639 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4640 			       &rsp);
4641 	}
4642 
4643 	return 0;
4644 }
4645 
4646 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4647 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4648 					u8 *data)
4649 {
4650 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4651 	u16 type, result;
4652 
4653 	if (cmd_len < sizeof(*rsp))
4654 		return -EPROTO;
4655 
4656 	type   = __le16_to_cpu(rsp->type);
4657 	result = __le16_to_cpu(rsp->result);
4658 
4659 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4660 
4661 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4662 	if (cmd->ident != conn->info_ident ||
4663 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4664 		return 0;
4665 
4666 	cancel_delayed_work(&conn->info_timer);
4667 
4668 	if (result != L2CAP_IR_SUCCESS) {
4669 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4670 		conn->info_ident = 0;
4671 
4672 		l2cap_conn_start(conn);
4673 
4674 		return 0;
4675 	}
4676 
4677 	switch (type) {
4678 	case L2CAP_IT_FEAT_MASK:
4679 		conn->feat_mask = get_unaligned_le32(rsp->data);
4680 
4681 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4682 			struct l2cap_info_req req;
4683 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4684 
4685 			conn->info_ident = l2cap_get_ident(conn);
4686 
4687 			l2cap_send_cmd(conn, conn->info_ident,
4688 				       L2CAP_INFO_REQ, sizeof(req), &req);
4689 		} else {
4690 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4691 			conn->info_ident = 0;
4692 
4693 			l2cap_conn_start(conn);
4694 		}
4695 		break;
4696 
4697 	case L2CAP_IT_FIXED_CHAN:
4698 		conn->remote_fixed_chan = rsp->data[0];
4699 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4700 		conn->info_ident = 0;
4701 
4702 		l2cap_conn_start(conn);
4703 		break;
4704 	}
4705 
4706 	return 0;
4707 }
4708 
4709 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4710 				    struct l2cap_cmd_hdr *cmd,
4711 				    u16 cmd_len, void *data)
4712 {
4713 	struct l2cap_create_chan_req *req = data;
4714 	struct l2cap_create_chan_rsp rsp;
4715 	struct l2cap_chan *chan;
4716 	struct hci_dev *hdev;
4717 	u16 psm, scid;
4718 
4719 	if (cmd_len != sizeof(*req))
4720 		return -EPROTO;
4721 
4722 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4723 		return -EINVAL;
4724 
4725 	psm = le16_to_cpu(req->psm);
4726 	scid = le16_to_cpu(req->scid);
4727 
4728 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4729 
4730 	/* For controller id 0 make BR/EDR connection */
4731 	if (req->amp_id == AMP_ID_BREDR) {
4732 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4733 			      req->amp_id);
4734 		return 0;
4735 	}
4736 
4737 	/* Validate AMP controller id */
4738 	hdev = hci_dev_get(req->amp_id);
4739 	if (!hdev)
4740 		goto error;
4741 
4742 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4743 		hci_dev_put(hdev);
4744 		goto error;
4745 	}
4746 
4747 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4748 			     req->amp_id);
4749 	if (chan) {
4750 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4751 		struct hci_conn *hs_hcon;
4752 
4753 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4754 						  &conn->hcon->dst);
4755 		if (!hs_hcon) {
4756 			hci_dev_put(hdev);
4757 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4758 					       chan->dcid);
4759 			return 0;
4760 		}
4761 
4762 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4763 
4764 		mgr->bredr_chan = chan;
4765 		chan->hs_hcon = hs_hcon;
4766 		chan->fcs = L2CAP_FCS_NONE;
4767 		conn->mtu = hdev->block_mtu;
4768 	}
4769 
4770 	hci_dev_put(hdev);
4771 
4772 	return 0;
4773 
4774 error:
4775 	rsp.dcid = 0;
4776 	rsp.scid = cpu_to_le16(scid);
4777 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4778 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4779 
4780 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4781 		       sizeof(rsp), &rsp);
4782 
4783 	return 0;
4784 }
4785 
4786 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4787 {
4788 	struct l2cap_move_chan_req req;
4789 	u8 ident;
4790 
4791 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4792 
4793 	ident = l2cap_get_ident(chan->conn);
4794 	chan->ident = ident;
4795 
4796 	req.icid = cpu_to_le16(chan->scid);
4797 	req.dest_amp_id = dest_amp_id;
4798 
4799 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4800 		       &req);
4801 
4802 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4803 }
4804 
4805 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4806 {
4807 	struct l2cap_move_chan_rsp rsp;
4808 
4809 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4810 
4811 	rsp.icid = cpu_to_le16(chan->dcid);
4812 	rsp.result = cpu_to_le16(result);
4813 
4814 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4815 		       sizeof(rsp), &rsp);
4816 }
4817 
4818 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4819 {
4820 	struct l2cap_move_chan_cfm cfm;
4821 
4822 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4823 
4824 	chan->ident = l2cap_get_ident(chan->conn);
4825 
4826 	cfm.icid = cpu_to_le16(chan->scid);
4827 	cfm.result = cpu_to_le16(result);
4828 
4829 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4830 		       sizeof(cfm), &cfm);
4831 
4832 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4833 }
4834 
4835 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4836 {
4837 	struct l2cap_move_chan_cfm cfm;
4838 
4839 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4840 
4841 	cfm.icid = cpu_to_le16(icid);
4842 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4843 
4844 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4845 		       sizeof(cfm), &cfm);
4846 }
4847 
4848 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4849 					 u16 icid)
4850 {
4851 	struct l2cap_move_chan_cfm_rsp rsp;
4852 
4853 	BT_DBG("icid 0x%4.4x", icid);
4854 
4855 	rsp.icid = cpu_to_le16(icid);
4856 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4857 }
4858 
4859 static void __release_logical_link(struct l2cap_chan *chan)
4860 {
4861 	chan->hs_hchan = NULL;
4862 	chan->hs_hcon = NULL;
4863 
4864 	/* Placeholder - release the logical link */
4865 }
4866 
4867 static void l2cap_logical_fail(struct l2cap_chan *chan)
4868 {
4869 	/* Logical link setup failed */
4870 	if (chan->state != BT_CONNECTED) {
4871 		/* Create channel failure, disconnect */
4872 		l2cap_send_disconn_req(chan, ECONNRESET);
4873 		return;
4874 	}
4875 
4876 	switch (chan->move_role) {
4877 	case L2CAP_MOVE_ROLE_RESPONDER:
4878 		l2cap_move_done(chan);
4879 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4880 		break;
4881 	case L2CAP_MOVE_ROLE_INITIATOR:
4882 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4883 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4884 			/* Remote has only sent pending or
4885 			 * success responses, clean up
4886 			 */
4887 			l2cap_move_done(chan);
4888 		}
4889 
4890 		/* Other amp move states imply that the move
4891 		 * has already aborted
4892 		 */
4893 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4894 		break;
4895 	}
4896 }
4897 
4898 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4899 					struct hci_chan *hchan)
4900 {
4901 	struct l2cap_conf_rsp rsp;
4902 
4903 	chan->hs_hchan = hchan;
4904 	chan->hs_hcon->l2cap_data = chan->conn;
4905 
4906 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4907 
4908 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4909 		int err;
4910 
4911 		set_default_fcs(chan);
4912 
4913 		err = l2cap_ertm_init(chan);
4914 		if (err < 0)
4915 			l2cap_send_disconn_req(chan, -err);
4916 		else
4917 			l2cap_chan_ready(chan);
4918 	}
4919 }
4920 
4921 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4922 				      struct hci_chan *hchan)
4923 {
4924 	chan->hs_hcon = hchan->conn;
4925 	chan->hs_hcon->l2cap_data = chan->conn;
4926 
4927 	BT_DBG("move_state %d", chan->move_state);
4928 
4929 	switch (chan->move_state) {
4930 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4931 		/* Move confirm will be sent after a success
4932 		 * response is received
4933 		 */
4934 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4935 		break;
4936 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4937 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4938 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4939 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4940 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4941 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4942 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4943 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4944 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4945 		}
4946 		break;
4947 	default:
4948 		/* Move was not in expected state, free the channel */
4949 		__release_logical_link(chan);
4950 
4951 		chan->move_state = L2CAP_MOVE_STABLE;
4952 	}
4953 }
4954 
4955 /* Call with chan locked */
4956 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4957 		       u8 status)
4958 {
4959 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4960 
4961 	if (status) {
4962 		l2cap_logical_fail(chan);
4963 		__release_logical_link(chan);
4964 		return;
4965 	}
4966 
4967 	if (chan->state != BT_CONNECTED) {
4968 		/* Ignore logical link if channel is on BR/EDR */
4969 		if (chan->local_amp_id != AMP_ID_BREDR)
4970 			l2cap_logical_finish_create(chan, hchan);
4971 	} else {
4972 		l2cap_logical_finish_move(chan, hchan);
4973 	}
4974 }
4975 
4976 void l2cap_move_start(struct l2cap_chan *chan)
4977 {
4978 	BT_DBG("chan %p", chan);
4979 
4980 	if (chan->local_amp_id == AMP_ID_BREDR) {
4981 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4982 			return;
4983 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4984 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4985 		/* Placeholder - start physical link setup */
4986 	} else {
4987 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4988 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4989 		chan->move_id = 0;
4990 		l2cap_move_setup(chan);
4991 		l2cap_send_move_chan_req(chan, 0);
4992 	}
4993 }
4994 
4995 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4996 			    u8 local_amp_id, u8 remote_amp_id)
4997 {
4998 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4999 	       local_amp_id, remote_amp_id);
5000 
5001 	chan->fcs = L2CAP_FCS_NONE;
5002 
5003 	/* Outgoing channel on AMP */
5004 	if (chan->state == BT_CONNECT) {
5005 		if (result == L2CAP_CR_SUCCESS) {
5006 			chan->local_amp_id = local_amp_id;
5007 			l2cap_send_create_chan_req(chan, remote_amp_id);
5008 		} else {
5009 			/* Revert to BR/EDR connect */
5010 			l2cap_send_conn_req(chan);
5011 		}
5012 
5013 		return;
5014 	}
5015 
5016 	/* Incoming channel on AMP */
5017 	if (__l2cap_no_conn_pending(chan)) {
5018 		struct l2cap_conn_rsp rsp;
5019 		char buf[128];
5020 		rsp.scid = cpu_to_le16(chan->dcid);
5021 		rsp.dcid = cpu_to_le16(chan->scid);
5022 
5023 		if (result == L2CAP_CR_SUCCESS) {
5024 			/* Send successful response */
5025 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5026 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5027 		} else {
5028 			/* Send negative response */
5029 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5030 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5031 		}
5032 
5033 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5034 			       sizeof(rsp), &rsp);
5035 
5036 		if (result == L2CAP_CR_SUCCESS) {
5037 			l2cap_state_change(chan, BT_CONFIG);
5038 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5039 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5040 				       L2CAP_CONF_REQ,
5041 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5042 			chan->num_conf_req++;
5043 		}
5044 	}
5045 }
5046 
5047 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5048 				   u8 remote_amp_id)
5049 {
5050 	l2cap_move_setup(chan);
5051 	chan->move_id = local_amp_id;
5052 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5053 
5054 	l2cap_send_move_chan_req(chan, remote_amp_id);
5055 }
5056 
5057 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5058 {
5059 	struct hci_chan *hchan = NULL;
5060 
5061 	/* Placeholder - get hci_chan for logical link */
5062 
5063 	if (hchan) {
5064 		if (hchan->state == BT_CONNECTED) {
5065 			/* Logical link is ready to go */
5066 			chan->hs_hcon = hchan->conn;
5067 			chan->hs_hcon->l2cap_data = chan->conn;
5068 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5069 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5070 
5071 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5072 		} else {
5073 			/* Wait for logical link to be ready */
5074 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5075 		}
5076 	} else {
5077 		/* Logical link not available */
5078 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5079 	}
5080 }
5081 
5082 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5083 {
5084 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5085 		u8 rsp_result;
5086 		if (result == -EINVAL)
5087 			rsp_result = L2CAP_MR_BAD_ID;
5088 		else
5089 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5090 
5091 		l2cap_send_move_chan_rsp(chan, rsp_result);
5092 	}
5093 
5094 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5095 	chan->move_state = L2CAP_MOVE_STABLE;
5096 
5097 	/* Restart data transmission */
5098 	l2cap_ertm_send(chan);
5099 }
5100 
5101 /* Invoke with locked chan */
5102 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5103 {
5104 	u8 local_amp_id = chan->local_amp_id;
5105 	u8 remote_amp_id = chan->remote_amp_id;
5106 
5107 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5108 	       chan, result, local_amp_id, remote_amp_id);
5109 
5110 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5111 		return;
5112 
5113 	if (chan->state != BT_CONNECTED) {
5114 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5115 	} else if (result != L2CAP_MR_SUCCESS) {
5116 		l2cap_do_move_cancel(chan, result);
5117 	} else {
5118 		switch (chan->move_role) {
5119 		case L2CAP_MOVE_ROLE_INITIATOR:
5120 			l2cap_do_move_initiate(chan, local_amp_id,
5121 					       remote_amp_id);
5122 			break;
5123 		case L2CAP_MOVE_ROLE_RESPONDER:
5124 			l2cap_do_move_respond(chan, result);
5125 			break;
5126 		default:
5127 			l2cap_do_move_cancel(chan, result);
5128 			break;
5129 		}
5130 	}
5131 }
5132 
5133 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5134 					 struct l2cap_cmd_hdr *cmd,
5135 					 u16 cmd_len, void *data)
5136 {
5137 	struct l2cap_move_chan_req *req = data;
5138 	struct l2cap_move_chan_rsp rsp;
5139 	struct l2cap_chan *chan;
5140 	u16 icid = 0;
5141 	u16 result = L2CAP_MR_NOT_ALLOWED;
5142 
5143 	if (cmd_len != sizeof(*req))
5144 		return -EPROTO;
5145 
5146 	icid = le16_to_cpu(req->icid);
5147 
5148 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5149 
5150 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5151 		return -EINVAL;
5152 
5153 	chan = l2cap_get_chan_by_dcid(conn, icid);
5154 	if (!chan) {
5155 		rsp.icid = cpu_to_le16(icid);
5156 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5157 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5158 			       sizeof(rsp), &rsp);
5159 		return 0;
5160 	}
5161 
5162 	chan->ident = cmd->ident;
5163 
5164 	if (chan->scid < L2CAP_CID_DYN_START ||
5165 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5166 	    (chan->mode != L2CAP_MODE_ERTM &&
5167 	     chan->mode != L2CAP_MODE_STREAMING)) {
5168 		result = L2CAP_MR_NOT_ALLOWED;
5169 		goto send_move_response;
5170 	}
5171 
5172 	if (chan->local_amp_id == req->dest_amp_id) {
5173 		result = L2CAP_MR_SAME_ID;
5174 		goto send_move_response;
5175 	}
5176 
5177 	if (req->dest_amp_id != AMP_ID_BREDR) {
5178 		struct hci_dev *hdev;
5179 		hdev = hci_dev_get(req->dest_amp_id);
5180 		if (!hdev || hdev->dev_type != HCI_AMP ||
5181 		    !test_bit(HCI_UP, &hdev->flags)) {
5182 			if (hdev)
5183 				hci_dev_put(hdev);
5184 
5185 			result = L2CAP_MR_BAD_ID;
5186 			goto send_move_response;
5187 		}
5188 		hci_dev_put(hdev);
5189 	}
5190 
5191 	/* Detect a move collision.  Only send a collision response
5192 	 * if this side has "lost", otherwise proceed with the move.
5193 	 * The winner has the larger bd_addr.
5194 	 */
5195 	if ((__chan_is_moving(chan) ||
5196 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5197 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5198 		result = L2CAP_MR_COLLISION;
5199 		goto send_move_response;
5200 	}
5201 
5202 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5203 	l2cap_move_setup(chan);
5204 	chan->move_id = req->dest_amp_id;
5205 
5206 	if (req->dest_amp_id == AMP_ID_BREDR) {
5207 		/* Moving to BR/EDR */
5208 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5209 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5210 			result = L2CAP_MR_PEND;
5211 		} else {
5212 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5213 			result = L2CAP_MR_SUCCESS;
5214 		}
5215 	} else {
5216 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5217 		/* Placeholder - uncomment when amp functions are available */
5218 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5219 		result = L2CAP_MR_PEND;
5220 	}
5221 
5222 send_move_response:
5223 	l2cap_send_move_chan_rsp(chan, result);
5224 
5225 	l2cap_chan_unlock(chan);
5226 
5227 	return 0;
5228 }
5229 
5230 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5231 {
5232 	struct l2cap_chan *chan;
5233 	struct hci_chan *hchan = NULL;
5234 
5235 	chan = l2cap_get_chan_by_scid(conn, icid);
5236 	if (!chan) {
5237 		l2cap_send_move_chan_cfm_icid(conn, icid);
5238 		return;
5239 	}
5240 
5241 	__clear_chan_timer(chan);
5242 	if (result == L2CAP_MR_PEND)
5243 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5244 
5245 	switch (chan->move_state) {
5246 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5247 		/* Move confirm will be sent when logical link
5248 		 * is complete.
5249 		 */
5250 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5251 		break;
5252 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5253 		if (result == L2CAP_MR_PEND) {
5254 			break;
5255 		} else if (test_bit(CONN_LOCAL_BUSY,
5256 				    &chan->conn_state)) {
5257 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5258 		} else {
5259 			/* Logical link is up or moving to BR/EDR,
5260 			 * proceed with move
5261 			 */
5262 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5263 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5264 		}
5265 		break;
5266 	case L2CAP_MOVE_WAIT_RSP:
5267 		/* Moving to AMP */
5268 		if (result == L2CAP_MR_SUCCESS) {
5269 			/* Remote is ready, send confirm immediately
5270 			 * after logical link is ready
5271 			 */
5272 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5273 		} else {
5274 			/* Both logical link and move success
5275 			 * are required to confirm
5276 			 */
5277 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5278 		}
5279 
5280 		/* Placeholder - get hci_chan for logical link */
5281 		if (!hchan) {
5282 			/* Logical link not available */
5283 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5284 			break;
5285 		}
5286 
5287 		/* If the logical link is not yet connected, do not
5288 		 * send confirmation.
5289 		 */
5290 		if (hchan->state != BT_CONNECTED)
5291 			break;
5292 
5293 		/* Logical link is already ready to go */
5294 
5295 		chan->hs_hcon = hchan->conn;
5296 		chan->hs_hcon->l2cap_data = chan->conn;
5297 
5298 		if (result == L2CAP_MR_SUCCESS) {
5299 			/* Can confirm now */
5300 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5301 		} else {
5302 			/* Now only need move success
5303 			 * to confirm
5304 			 */
5305 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5306 		}
5307 
5308 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5309 		break;
5310 	default:
5311 		/* Any other amp move state means the move failed. */
5312 		chan->move_id = chan->local_amp_id;
5313 		l2cap_move_done(chan);
5314 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5315 	}
5316 
5317 	l2cap_chan_unlock(chan);
5318 }
5319 
5320 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5321 			    u16 result)
5322 {
5323 	struct l2cap_chan *chan;
5324 
5325 	chan = l2cap_get_chan_by_ident(conn, ident);
5326 	if (!chan) {
5327 		/* Could not locate channel, icid is best guess */
5328 		l2cap_send_move_chan_cfm_icid(conn, icid);
5329 		return;
5330 	}
5331 
5332 	__clear_chan_timer(chan);
5333 
5334 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5335 		if (result == L2CAP_MR_COLLISION) {
5336 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5337 		} else {
5338 			/* Cleanup - cancel move */
5339 			chan->move_id = chan->local_amp_id;
5340 			l2cap_move_done(chan);
5341 		}
5342 	}
5343 
5344 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5345 
5346 	l2cap_chan_unlock(chan);
5347 }
5348 
5349 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5350 				  struct l2cap_cmd_hdr *cmd,
5351 				  u16 cmd_len, void *data)
5352 {
5353 	struct l2cap_move_chan_rsp *rsp = data;
5354 	u16 icid, result;
5355 
5356 	if (cmd_len != sizeof(*rsp))
5357 		return -EPROTO;
5358 
5359 	icid = le16_to_cpu(rsp->icid);
5360 	result = le16_to_cpu(rsp->result);
5361 
5362 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5363 
5364 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5365 		l2cap_move_continue(conn, icid, result);
5366 	else
5367 		l2cap_move_fail(conn, cmd->ident, icid, result);
5368 
5369 	return 0;
5370 }
5371 
5372 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5373 				      struct l2cap_cmd_hdr *cmd,
5374 				      u16 cmd_len, void *data)
5375 {
5376 	struct l2cap_move_chan_cfm *cfm = data;
5377 	struct l2cap_chan *chan;
5378 	u16 icid, result;
5379 
5380 	if (cmd_len != sizeof(*cfm))
5381 		return -EPROTO;
5382 
5383 	icid = le16_to_cpu(cfm->icid);
5384 	result = le16_to_cpu(cfm->result);
5385 
5386 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5387 
5388 	chan = l2cap_get_chan_by_dcid(conn, icid);
5389 	if (!chan) {
5390 		/* Spec requires a response even if the icid was not found */
5391 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5392 		return 0;
5393 	}
5394 
5395 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5396 		if (result == L2CAP_MC_CONFIRMED) {
5397 			chan->local_amp_id = chan->move_id;
5398 			if (chan->local_amp_id == AMP_ID_BREDR)
5399 				__release_logical_link(chan);
5400 		} else {
5401 			chan->move_id = chan->local_amp_id;
5402 		}
5403 
5404 		l2cap_move_done(chan);
5405 	}
5406 
5407 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5408 
5409 	l2cap_chan_unlock(chan);
5410 
5411 	return 0;
5412 }
5413 
5414 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5415 						 struct l2cap_cmd_hdr *cmd,
5416 						 u16 cmd_len, void *data)
5417 {
5418 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5419 	struct l2cap_chan *chan;
5420 	u16 icid;
5421 
5422 	if (cmd_len != sizeof(*rsp))
5423 		return -EPROTO;
5424 
5425 	icid = le16_to_cpu(rsp->icid);
5426 
5427 	BT_DBG("icid 0x%4.4x", icid);
5428 
5429 	chan = l2cap_get_chan_by_scid(conn, icid);
5430 	if (!chan)
5431 		return 0;
5432 
5433 	__clear_chan_timer(chan);
5434 
5435 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5436 		chan->local_amp_id = chan->move_id;
5437 
5438 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5439 			__release_logical_link(chan);
5440 
5441 		l2cap_move_done(chan);
5442 	}
5443 
5444 	l2cap_chan_unlock(chan);
5445 
5446 	return 0;
5447 }
5448 
5449 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5450 					      struct l2cap_cmd_hdr *cmd,
5451 					      u16 cmd_len, u8 *data)
5452 {
5453 	struct hci_conn *hcon = conn->hcon;
5454 	struct l2cap_conn_param_update_req *req;
5455 	struct l2cap_conn_param_update_rsp rsp;
5456 	u16 min, max, latency, to_multiplier;
5457 	int err;
5458 
5459 	if (hcon->role != HCI_ROLE_MASTER)
5460 		return -EINVAL;
5461 
5462 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5463 		return -EPROTO;
5464 
5465 	req = (struct l2cap_conn_param_update_req *) data;
5466 	min		= __le16_to_cpu(req->min);
5467 	max		= __le16_to_cpu(req->max);
5468 	latency		= __le16_to_cpu(req->latency);
5469 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5470 
5471 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5472 	       min, max, latency, to_multiplier);
5473 
5474 	memset(&rsp, 0, sizeof(rsp));
5475 
5476 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5477 	if (err)
5478 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5479 	else
5480 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5481 
5482 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5483 		       sizeof(rsp), &rsp);
5484 
5485 	if (!err) {
5486 		u8 store_hint;
5487 
5488 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5489 						to_multiplier);
5490 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5491 				    store_hint, min, max, latency,
5492 				    to_multiplier);
5493 
5494 	}
5495 
5496 	return 0;
5497 }
5498 
5499 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5500 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5501 				u8 *data)
5502 {
5503 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5504 	struct hci_conn *hcon = conn->hcon;
5505 	u16 dcid, mtu, mps, credits, result;
5506 	struct l2cap_chan *chan;
5507 	int err, sec_level;
5508 
5509 	if (cmd_len < sizeof(*rsp))
5510 		return -EPROTO;
5511 
5512 	dcid    = __le16_to_cpu(rsp->dcid);
5513 	mtu     = __le16_to_cpu(rsp->mtu);
5514 	mps     = __le16_to_cpu(rsp->mps);
5515 	credits = __le16_to_cpu(rsp->credits);
5516 	result  = __le16_to_cpu(rsp->result);
5517 
5518 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5519 					   dcid < L2CAP_CID_DYN_START ||
5520 					   dcid > L2CAP_CID_LE_DYN_END))
5521 		return -EPROTO;
5522 
5523 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5524 	       dcid, mtu, mps, credits, result);
5525 
5526 	mutex_lock(&conn->chan_lock);
5527 
5528 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5529 	if (!chan) {
5530 		err = -EBADSLT;
5531 		goto unlock;
5532 	}
5533 
5534 	err = 0;
5535 
5536 	l2cap_chan_lock(chan);
5537 
5538 	switch (result) {
5539 	case L2CAP_CR_LE_SUCCESS:
5540 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5541 			err = -EBADSLT;
5542 			break;
5543 		}
5544 
5545 		chan->ident = 0;
5546 		chan->dcid = dcid;
5547 		chan->omtu = mtu;
5548 		chan->remote_mps = mps;
5549 		chan->tx_credits = credits;
5550 		l2cap_chan_ready(chan);
5551 		break;
5552 
5553 	case L2CAP_CR_LE_AUTHENTICATION:
5554 	case L2CAP_CR_LE_ENCRYPTION:
5555 		/* If we already have MITM protection we can't do
5556 		 * anything.
5557 		 */
5558 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5559 			l2cap_chan_del(chan, ECONNREFUSED);
5560 			break;
5561 		}
5562 
5563 		sec_level = hcon->sec_level + 1;
5564 		if (chan->sec_level < sec_level)
5565 			chan->sec_level = sec_level;
5566 
5567 		/* We'll need to send a new Connect Request */
5568 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5569 
5570 		smp_conn_security(hcon, chan->sec_level);
5571 		break;
5572 
5573 	default:
5574 		l2cap_chan_del(chan, ECONNREFUSED);
5575 		break;
5576 	}
5577 
5578 	l2cap_chan_unlock(chan);
5579 
5580 unlock:
5581 	mutex_unlock(&conn->chan_lock);
5582 
5583 	return err;
5584 }
5585 
5586 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5587 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5588 				      u8 *data)
5589 {
5590 	int err = 0;
5591 
5592 	switch (cmd->code) {
5593 	case L2CAP_COMMAND_REJ:
5594 		l2cap_command_rej(conn, cmd, cmd_len, data);
5595 		break;
5596 
5597 	case L2CAP_CONN_REQ:
5598 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5599 		break;
5600 
5601 	case L2CAP_CONN_RSP:
5602 	case L2CAP_CREATE_CHAN_RSP:
5603 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5604 		break;
5605 
5606 	case L2CAP_CONF_REQ:
5607 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5608 		break;
5609 
5610 	case L2CAP_CONF_RSP:
5611 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5612 		break;
5613 
5614 	case L2CAP_DISCONN_REQ:
5615 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5616 		break;
5617 
5618 	case L2CAP_DISCONN_RSP:
5619 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5620 		break;
5621 
5622 	case L2CAP_ECHO_REQ:
5623 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5624 		break;
5625 
5626 	case L2CAP_ECHO_RSP:
5627 		break;
5628 
5629 	case L2CAP_INFO_REQ:
5630 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5631 		break;
5632 
5633 	case L2CAP_INFO_RSP:
5634 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5635 		break;
5636 
5637 	case L2CAP_CREATE_CHAN_REQ:
5638 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5639 		break;
5640 
5641 	case L2CAP_MOVE_CHAN_REQ:
5642 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5643 		break;
5644 
5645 	case L2CAP_MOVE_CHAN_RSP:
5646 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5647 		break;
5648 
5649 	case L2CAP_MOVE_CHAN_CFM:
5650 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5651 		break;
5652 
5653 	case L2CAP_MOVE_CHAN_CFM_RSP:
5654 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5655 		break;
5656 
5657 	default:
5658 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5659 		err = -EINVAL;
5660 		break;
5661 	}
5662 
5663 	return err;
5664 }
5665 
5666 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5667 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5668 				u8 *data)
5669 {
5670 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5671 	struct l2cap_le_conn_rsp rsp;
5672 	struct l2cap_chan *chan, *pchan;
5673 	u16 dcid, scid, credits, mtu, mps;
5674 	__le16 psm;
5675 	u8 result;
5676 
5677 	if (cmd_len != sizeof(*req))
5678 		return -EPROTO;
5679 
5680 	scid = __le16_to_cpu(req->scid);
5681 	mtu  = __le16_to_cpu(req->mtu);
5682 	mps  = __le16_to_cpu(req->mps);
5683 	psm  = req->psm;
5684 	dcid = 0;
5685 	credits = 0;
5686 
5687 	if (mtu < 23 || mps < 23)
5688 		return -EPROTO;
5689 
5690 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5691 	       scid, mtu, mps);
5692 
5693 	/* Check if we have socket listening on psm */
5694 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5695 					 &conn->hcon->dst, LE_LINK);
5696 	if (!pchan) {
5697 		result = L2CAP_CR_LE_BAD_PSM;
5698 		chan = NULL;
5699 		goto response;
5700 	}
5701 
5702 	mutex_lock(&conn->chan_lock);
5703 	l2cap_chan_lock(pchan);
5704 
5705 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5706 				     SMP_ALLOW_STK)) {
5707 		result = L2CAP_CR_LE_AUTHENTICATION;
5708 		chan = NULL;
5709 		goto response_unlock;
5710 	}
5711 
5712 	/* Check for valid dynamic CID range */
5713 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5714 		result = L2CAP_CR_LE_INVALID_SCID;
5715 		chan = NULL;
5716 		goto response_unlock;
5717 	}
5718 
5719 	/* Check if we already have channel with that dcid */
5720 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5721 		result = L2CAP_CR_LE_SCID_IN_USE;
5722 		chan = NULL;
5723 		goto response_unlock;
5724 	}
5725 
5726 	chan = pchan->ops->new_connection(pchan);
5727 	if (!chan) {
5728 		result = L2CAP_CR_LE_NO_MEM;
5729 		goto response_unlock;
5730 	}
5731 
5732 	bacpy(&chan->src, &conn->hcon->src);
5733 	bacpy(&chan->dst, &conn->hcon->dst);
5734 	chan->src_type = bdaddr_src_type(conn->hcon);
5735 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5736 	chan->psm  = psm;
5737 	chan->dcid = scid;
5738 	chan->omtu = mtu;
5739 	chan->remote_mps = mps;
5740 
5741 	__l2cap_chan_add(conn, chan);
5742 
5743 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5744 
5745 	dcid = chan->scid;
5746 	credits = chan->rx_credits;
5747 
5748 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5749 
5750 	chan->ident = cmd->ident;
5751 
5752 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5753 		l2cap_state_change(chan, BT_CONNECT2);
5754 		/* The following result value is actually not defined
5755 		 * for LE CoC but we use it to let the function know
5756 		 * that it should bail out after doing its cleanup
5757 		 * instead of sending a response.
5758 		 */
5759 		result = L2CAP_CR_PEND;
5760 		chan->ops->defer(chan);
5761 	} else {
5762 		l2cap_chan_ready(chan);
5763 		result = L2CAP_CR_LE_SUCCESS;
5764 	}
5765 
5766 response_unlock:
5767 	l2cap_chan_unlock(pchan);
5768 	mutex_unlock(&conn->chan_lock);
5769 	l2cap_chan_put(pchan);
5770 
5771 	if (result == L2CAP_CR_PEND)
5772 		return 0;
5773 
5774 response:
5775 	if (chan) {
5776 		rsp.mtu = cpu_to_le16(chan->imtu);
5777 		rsp.mps = cpu_to_le16(chan->mps);
5778 	} else {
5779 		rsp.mtu = 0;
5780 		rsp.mps = 0;
5781 	}
5782 
5783 	rsp.dcid    = cpu_to_le16(dcid);
5784 	rsp.credits = cpu_to_le16(credits);
5785 	rsp.result  = cpu_to_le16(result);
5786 
5787 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5788 
5789 	return 0;
5790 }
5791 
5792 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5793 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5794 				   u8 *data)
5795 {
5796 	struct l2cap_le_credits *pkt;
5797 	struct l2cap_chan *chan;
5798 	u16 cid, credits, max_credits;
5799 
5800 	if (cmd_len != sizeof(*pkt))
5801 		return -EPROTO;
5802 
5803 	pkt = (struct l2cap_le_credits *) data;
5804 	cid	= __le16_to_cpu(pkt->cid);
5805 	credits	= __le16_to_cpu(pkt->credits);
5806 
5807 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5808 
5809 	chan = l2cap_get_chan_by_dcid(conn, cid);
5810 	if (!chan)
5811 		return -EBADSLT;
5812 
5813 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5814 	if (credits > max_credits) {
5815 		BT_ERR("LE credits overflow");
5816 		l2cap_send_disconn_req(chan, ECONNRESET);
5817 		l2cap_chan_unlock(chan);
5818 
5819 		/* Return 0 so that we don't trigger an unnecessary
5820 		 * command reject packet.
5821 		 */
5822 		return 0;
5823 	}
5824 
5825 	chan->tx_credits += credits;
5826 
5827 	/* Resume sending */
5828 	l2cap_le_flowctl_send(chan);
5829 
5830 	if (chan->tx_credits)
5831 		chan->ops->resume(chan);
5832 
5833 	l2cap_chan_unlock(chan);
5834 
5835 	return 0;
5836 }
5837 
5838 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5839 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5840 				       u8 *data)
5841 {
5842 	struct l2cap_ecred_conn_req *req = (void *) data;
5843 	struct {
5844 		struct l2cap_ecred_conn_rsp rsp;
5845 		__le16 dcid[5];
5846 	} __packed pdu;
5847 	struct l2cap_chan *chan, *pchan;
5848 	u16 mtu, mps;
5849 	__le16 psm;
5850 	u8 result, len = 0;
5851 	int i, num_scid;
5852 	bool defer = false;
5853 
5854 	if (!enable_ecred)
5855 		return -EINVAL;
5856 
5857 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5858 		result = L2CAP_CR_LE_INVALID_PARAMS;
5859 		goto response;
5860 	}
5861 
5862 	mtu  = __le16_to_cpu(req->mtu);
5863 	mps  = __le16_to_cpu(req->mps);
5864 
5865 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5866 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5867 		goto response;
5868 	}
5869 
5870 	psm  = req->psm;
5871 
5872 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5873 
5874 	memset(&pdu, 0, sizeof(pdu));
5875 
5876 	/* Check if we have socket listening on psm */
5877 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5878 					 &conn->hcon->dst, LE_LINK);
5879 	if (!pchan) {
5880 		result = L2CAP_CR_LE_BAD_PSM;
5881 		goto response;
5882 	}
5883 
5884 	mutex_lock(&conn->chan_lock);
5885 	l2cap_chan_lock(pchan);
5886 
5887 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5888 				     SMP_ALLOW_STK)) {
5889 		result = L2CAP_CR_LE_AUTHENTICATION;
5890 		goto unlock;
5891 	}
5892 
5893 	result = L2CAP_CR_LE_SUCCESS;
5894 	cmd_len -= sizeof(req);
5895 	num_scid = cmd_len / sizeof(u16);
5896 
5897 	for (i = 0; i < num_scid; i++) {
5898 		u16 scid = __le16_to_cpu(req->scid[i]);
5899 
5900 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
5901 
5902 		pdu.dcid[i] = 0x0000;
5903 		len += sizeof(*pdu.dcid);
5904 
5905 		/* Check for valid dynamic CID range */
5906 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5907 			result = L2CAP_CR_LE_INVALID_SCID;
5908 			continue;
5909 		}
5910 
5911 		/* Check if we already have channel with that dcid */
5912 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
5913 			result = L2CAP_CR_LE_SCID_IN_USE;
5914 			continue;
5915 		}
5916 
5917 		chan = pchan->ops->new_connection(pchan);
5918 		if (!chan) {
5919 			result = L2CAP_CR_LE_NO_MEM;
5920 			continue;
5921 		}
5922 
5923 		bacpy(&chan->src, &conn->hcon->src);
5924 		bacpy(&chan->dst, &conn->hcon->dst);
5925 		chan->src_type = bdaddr_src_type(conn->hcon);
5926 		chan->dst_type = bdaddr_dst_type(conn->hcon);
5927 		chan->psm  = psm;
5928 		chan->dcid = scid;
5929 		chan->omtu = mtu;
5930 		chan->remote_mps = mps;
5931 
5932 		__l2cap_chan_add(conn, chan);
5933 
5934 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5935 
5936 		/* Init response */
5937 		if (!pdu.rsp.credits) {
5938 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
5939 			pdu.rsp.mps = cpu_to_le16(chan->mps);
5940 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
5941 		}
5942 
5943 		pdu.dcid[i] = cpu_to_le16(chan->scid);
5944 
5945 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5946 
5947 		chan->ident = cmd->ident;
5948 
5949 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5950 			l2cap_state_change(chan, BT_CONNECT2);
5951 			defer = true;
5952 			chan->ops->defer(chan);
5953 		} else {
5954 			l2cap_chan_ready(chan);
5955 		}
5956 	}
5957 
5958 unlock:
5959 	l2cap_chan_unlock(pchan);
5960 	mutex_unlock(&conn->chan_lock);
5961 	l2cap_chan_put(pchan);
5962 
5963 response:
5964 	pdu.rsp.result = cpu_to_le16(result);
5965 
5966 	if (defer)
5967 		return 0;
5968 
5969 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5970 		       sizeof(pdu.rsp) + len, &pdu);
5971 
5972 	return 0;
5973 }
5974 
5975 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5976 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5977 				       u8 *data)
5978 {
5979 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5980 	struct hci_conn *hcon = conn->hcon;
5981 	u16 mtu, mps, credits, result;
5982 	struct l2cap_chan *chan;
5983 	int err = 0, sec_level;
5984 	int i = 0;
5985 
5986 	if (cmd_len < sizeof(*rsp))
5987 		return -EPROTO;
5988 
5989 	mtu     = __le16_to_cpu(rsp->mtu);
5990 	mps     = __le16_to_cpu(rsp->mps);
5991 	credits = __le16_to_cpu(rsp->credits);
5992 	result  = __le16_to_cpu(rsp->result);
5993 
5994 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5995 	       result);
5996 
5997 	mutex_lock(&conn->chan_lock);
5998 
5999 	cmd_len -= sizeof(*rsp);
6000 
6001 	list_for_each_entry(chan, &conn->chan_l, list) {
6002 		u16 dcid;
6003 
6004 		if (chan->ident != cmd->ident ||
6005 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6006 		    chan->state == BT_CONNECTED)
6007 			continue;
6008 
6009 		l2cap_chan_lock(chan);
6010 
6011 		/* Check that there is a dcid for each pending channel */
6012 		if (cmd_len < sizeof(dcid)) {
6013 			l2cap_chan_del(chan, ECONNREFUSED);
6014 			l2cap_chan_unlock(chan);
6015 			continue;
6016 		}
6017 
6018 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6019 		cmd_len -= sizeof(u16);
6020 
6021 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6022 
6023 		/* Check if dcid is already in use */
6024 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6025 			/* If a device receives a
6026 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6027 			 * already-assigned Destination CID, then both the
6028 			 * original channel and the new channel shall be
6029 			 * immediately discarded and not used.
6030 			 */
6031 			l2cap_chan_del(chan, ECONNREFUSED);
6032 			l2cap_chan_unlock(chan);
6033 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6034 			l2cap_chan_lock(chan);
6035 			l2cap_chan_del(chan, ECONNRESET);
6036 			l2cap_chan_unlock(chan);
6037 			continue;
6038 		}
6039 
6040 		switch (result) {
6041 		case L2CAP_CR_LE_AUTHENTICATION:
6042 		case L2CAP_CR_LE_ENCRYPTION:
6043 			/* If we already have MITM protection we can't do
6044 			 * anything.
6045 			 */
6046 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6047 				l2cap_chan_del(chan, ECONNREFUSED);
6048 				break;
6049 			}
6050 
6051 			sec_level = hcon->sec_level + 1;
6052 			if (chan->sec_level < sec_level)
6053 				chan->sec_level = sec_level;
6054 
6055 			/* We'll need to send a new Connect Request */
6056 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6057 
6058 			smp_conn_security(hcon, chan->sec_level);
6059 			break;
6060 
6061 		case L2CAP_CR_LE_BAD_PSM:
6062 			l2cap_chan_del(chan, ECONNREFUSED);
6063 			break;
6064 
6065 		default:
6066 			/* If dcid was not set it means channels was refused */
6067 			if (!dcid) {
6068 				l2cap_chan_del(chan, ECONNREFUSED);
6069 				break;
6070 			}
6071 
6072 			chan->ident = 0;
6073 			chan->dcid = dcid;
6074 			chan->omtu = mtu;
6075 			chan->remote_mps = mps;
6076 			chan->tx_credits = credits;
6077 			l2cap_chan_ready(chan);
6078 			break;
6079 		}
6080 
6081 		l2cap_chan_unlock(chan);
6082 	}
6083 
6084 	mutex_unlock(&conn->chan_lock);
6085 
6086 	return err;
6087 }
6088 
6089 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6090 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6091 					 u8 *data)
6092 {
6093 	struct l2cap_ecred_reconf_req *req = (void *) data;
6094 	struct l2cap_ecred_reconf_rsp rsp;
6095 	u16 mtu, mps, result;
6096 	struct l2cap_chan *chan;
6097 	int i, num_scid;
6098 
6099 	if (!enable_ecred)
6100 		return -EINVAL;
6101 
6102 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6103 		result = L2CAP_CR_LE_INVALID_PARAMS;
6104 		goto respond;
6105 	}
6106 
6107 	mtu = __le16_to_cpu(req->mtu);
6108 	mps = __le16_to_cpu(req->mps);
6109 
6110 	BT_DBG("mtu %u mps %u", mtu, mps);
6111 
6112 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6113 		result = L2CAP_RECONF_INVALID_MTU;
6114 		goto respond;
6115 	}
6116 
6117 	if (mps < L2CAP_ECRED_MIN_MPS) {
6118 		result = L2CAP_RECONF_INVALID_MPS;
6119 		goto respond;
6120 	}
6121 
6122 	cmd_len -= sizeof(*req);
6123 	num_scid = cmd_len / sizeof(u16);
6124 	result = L2CAP_RECONF_SUCCESS;
6125 
6126 	for (i = 0; i < num_scid; i++) {
6127 		u16 scid;
6128 
6129 		scid = __le16_to_cpu(req->scid[i]);
6130 		if (!scid)
6131 			return -EPROTO;
6132 
6133 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6134 		if (!chan)
6135 			continue;
6136 
6137 		/* If the MTU value is decreased for any of the included
6138 		 * channels, then the receiver shall disconnect all
6139 		 * included channels.
6140 		 */
6141 		if (chan->omtu > mtu) {
6142 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6143 			       chan->omtu, mtu);
6144 			result = L2CAP_RECONF_INVALID_MTU;
6145 		}
6146 
6147 		chan->omtu = mtu;
6148 		chan->remote_mps = mps;
6149 	}
6150 
6151 respond:
6152 	rsp.result = cpu_to_le16(result);
6153 
6154 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6155 		       &rsp);
6156 
6157 	return 0;
6158 }
6159 
6160 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6161 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6162 					 u8 *data)
6163 {
6164 	struct l2cap_chan *chan;
6165 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6166 	u16 result;
6167 
6168 	if (cmd_len < sizeof(*rsp))
6169 		return -EPROTO;
6170 
6171 	result = __le16_to_cpu(rsp->result);
6172 
6173 	BT_DBG("result 0x%4.4x", rsp->result);
6174 
6175 	if (!result)
6176 		return 0;
6177 
6178 	list_for_each_entry(chan, &conn->chan_l, list) {
6179 		if (chan->ident != cmd->ident)
6180 			continue;
6181 
6182 		l2cap_chan_del(chan, ECONNRESET);
6183 	}
6184 
6185 	return 0;
6186 }
6187 
6188 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6189 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6190 				       u8 *data)
6191 {
6192 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6193 	struct l2cap_chan *chan;
6194 
6195 	if (cmd_len < sizeof(*rej))
6196 		return -EPROTO;
6197 
6198 	mutex_lock(&conn->chan_lock);
6199 
6200 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6201 	if (!chan)
6202 		goto done;
6203 
6204 	l2cap_chan_lock(chan);
6205 	l2cap_chan_del(chan, ECONNREFUSED);
6206 	l2cap_chan_unlock(chan);
6207 
6208 done:
6209 	mutex_unlock(&conn->chan_lock);
6210 	return 0;
6211 }
6212 
6213 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6214 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6215 				   u8 *data)
6216 {
6217 	int err = 0;
6218 
6219 	switch (cmd->code) {
6220 	case L2CAP_COMMAND_REJ:
6221 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6222 		break;
6223 
6224 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6225 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6226 		break;
6227 
6228 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6229 		break;
6230 
6231 	case L2CAP_LE_CONN_RSP:
6232 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6233 		break;
6234 
6235 	case L2CAP_LE_CONN_REQ:
6236 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6237 		break;
6238 
6239 	case L2CAP_LE_CREDITS:
6240 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6241 		break;
6242 
6243 	case L2CAP_ECRED_CONN_REQ:
6244 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6245 		break;
6246 
6247 	case L2CAP_ECRED_CONN_RSP:
6248 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6249 		break;
6250 
6251 	case L2CAP_ECRED_RECONF_REQ:
6252 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6253 		break;
6254 
6255 	case L2CAP_ECRED_RECONF_RSP:
6256 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6257 		break;
6258 
6259 	case L2CAP_DISCONN_REQ:
6260 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6261 		break;
6262 
6263 	case L2CAP_DISCONN_RSP:
6264 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6265 		break;
6266 
6267 	default:
6268 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6269 		err = -EINVAL;
6270 		break;
6271 	}
6272 
6273 	return err;
6274 }
6275 
6276 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6277 					struct sk_buff *skb)
6278 {
6279 	struct hci_conn *hcon = conn->hcon;
6280 	struct l2cap_cmd_hdr *cmd;
6281 	u16 len;
6282 	int err;
6283 
6284 	if (hcon->type != LE_LINK)
6285 		goto drop;
6286 
6287 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6288 		goto drop;
6289 
6290 	cmd = (void *) skb->data;
6291 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6292 
6293 	len = le16_to_cpu(cmd->len);
6294 
6295 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6296 
6297 	if (len != skb->len || !cmd->ident) {
6298 		BT_DBG("corrupted command");
6299 		goto drop;
6300 	}
6301 
6302 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6303 	if (err) {
6304 		struct l2cap_cmd_rej_unk rej;
6305 
6306 		BT_ERR("Wrong link type (%d)", err);
6307 
6308 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6309 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6310 			       sizeof(rej), &rej);
6311 	}
6312 
6313 drop:
6314 	kfree_skb(skb);
6315 }
6316 
6317 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6318 				     struct sk_buff *skb)
6319 {
6320 	struct hci_conn *hcon = conn->hcon;
6321 	struct l2cap_cmd_hdr *cmd;
6322 	int err;
6323 
6324 	l2cap_raw_recv(conn, skb);
6325 
6326 	if (hcon->type != ACL_LINK)
6327 		goto drop;
6328 
6329 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6330 		u16 len;
6331 
6332 		cmd = (void *) skb->data;
6333 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6334 
6335 		len = le16_to_cpu(cmd->len);
6336 
6337 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6338 		       cmd->ident);
6339 
6340 		if (len > skb->len || !cmd->ident) {
6341 			BT_DBG("corrupted command");
6342 			break;
6343 		}
6344 
6345 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6346 		if (err) {
6347 			struct l2cap_cmd_rej_unk rej;
6348 
6349 			BT_ERR("Wrong link type (%d)", err);
6350 
6351 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6352 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6353 				       sizeof(rej), &rej);
6354 		}
6355 
6356 		skb_pull(skb, len);
6357 	}
6358 
6359 drop:
6360 	kfree_skb(skb);
6361 }
6362 
6363 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6364 {
6365 	u16 our_fcs, rcv_fcs;
6366 	int hdr_size;
6367 
6368 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6369 		hdr_size = L2CAP_EXT_HDR_SIZE;
6370 	else
6371 		hdr_size = L2CAP_ENH_HDR_SIZE;
6372 
6373 	if (chan->fcs == L2CAP_FCS_CRC16) {
6374 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6375 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6376 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6377 
6378 		if (our_fcs != rcv_fcs)
6379 			return -EBADMSG;
6380 	}
6381 	return 0;
6382 }
6383 
6384 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6385 {
6386 	struct l2cap_ctrl control;
6387 
6388 	BT_DBG("chan %p", chan);
6389 
6390 	memset(&control, 0, sizeof(control));
6391 	control.sframe = 1;
6392 	control.final = 1;
6393 	control.reqseq = chan->buffer_seq;
6394 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6395 
6396 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6397 		control.super = L2CAP_SUPER_RNR;
6398 		l2cap_send_sframe(chan, &control);
6399 	}
6400 
6401 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6402 	    chan->unacked_frames > 0)
6403 		__set_retrans_timer(chan);
6404 
6405 	/* Send pending iframes */
6406 	l2cap_ertm_send(chan);
6407 
6408 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6409 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6410 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6411 		 * send it now.
6412 		 */
6413 		control.super = L2CAP_SUPER_RR;
6414 		l2cap_send_sframe(chan, &control);
6415 	}
6416 }
6417 
6418 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6419 			    struct sk_buff **last_frag)
6420 {
6421 	/* skb->len reflects data in skb as well as all fragments
6422 	 * skb->data_len reflects only data in fragments
6423 	 */
6424 	if (!skb_has_frag_list(skb))
6425 		skb_shinfo(skb)->frag_list = new_frag;
6426 
6427 	new_frag->next = NULL;
6428 
6429 	(*last_frag)->next = new_frag;
6430 	*last_frag = new_frag;
6431 
6432 	skb->len += new_frag->len;
6433 	skb->data_len += new_frag->len;
6434 	skb->truesize += new_frag->truesize;
6435 }
6436 
6437 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6438 				struct l2cap_ctrl *control)
6439 {
6440 	int err = -EINVAL;
6441 
6442 	switch (control->sar) {
6443 	case L2CAP_SAR_UNSEGMENTED:
6444 		if (chan->sdu)
6445 			break;
6446 
6447 		err = chan->ops->recv(chan, skb);
6448 		break;
6449 
6450 	case L2CAP_SAR_START:
6451 		if (chan->sdu)
6452 			break;
6453 
6454 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6455 			break;
6456 
6457 		chan->sdu_len = get_unaligned_le16(skb->data);
6458 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6459 
6460 		if (chan->sdu_len > chan->imtu) {
6461 			err = -EMSGSIZE;
6462 			break;
6463 		}
6464 
6465 		if (skb->len >= chan->sdu_len)
6466 			break;
6467 
6468 		chan->sdu = skb;
6469 		chan->sdu_last_frag = skb;
6470 
6471 		skb = NULL;
6472 		err = 0;
6473 		break;
6474 
6475 	case L2CAP_SAR_CONTINUE:
6476 		if (!chan->sdu)
6477 			break;
6478 
6479 		append_skb_frag(chan->sdu, skb,
6480 				&chan->sdu_last_frag);
6481 		skb = NULL;
6482 
6483 		if (chan->sdu->len >= chan->sdu_len)
6484 			break;
6485 
6486 		err = 0;
6487 		break;
6488 
6489 	case L2CAP_SAR_END:
6490 		if (!chan->sdu)
6491 			break;
6492 
6493 		append_skb_frag(chan->sdu, skb,
6494 				&chan->sdu_last_frag);
6495 		skb = NULL;
6496 
6497 		if (chan->sdu->len != chan->sdu_len)
6498 			break;
6499 
6500 		err = chan->ops->recv(chan, chan->sdu);
6501 
6502 		if (!err) {
6503 			/* Reassembly complete */
6504 			chan->sdu = NULL;
6505 			chan->sdu_last_frag = NULL;
6506 			chan->sdu_len = 0;
6507 		}
6508 		break;
6509 	}
6510 
6511 	if (err) {
6512 		kfree_skb(skb);
6513 		kfree_skb(chan->sdu);
6514 		chan->sdu = NULL;
6515 		chan->sdu_last_frag = NULL;
6516 		chan->sdu_len = 0;
6517 	}
6518 
6519 	return err;
6520 }
6521 
6522 static int l2cap_resegment(struct l2cap_chan *chan)
6523 {
6524 	/* Placeholder */
6525 	return 0;
6526 }
6527 
6528 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6529 {
6530 	u8 event;
6531 
6532 	if (chan->mode != L2CAP_MODE_ERTM)
6533 		return;
6534 
6535 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6536 	l2cap_tx(chan, NULL, NULL, event);
6537 }
6538 
6539 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6540 {
6541 	int err = 0;
6542 	/* Pass sequential frames to l2cap_reassemble_sdu()
6543 	 * until a gap is encountered.
6544 	 */
6545 
6546 	BT_DBG("chan %p", chan);
6547 
6548 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6549 		struct sk_buff *skb;
6550 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6551 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6552 
6553 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6554 
6555 		if (!skb)
6556 			break;
6557 
6558 		skb_unlink(skb, &chan->srej_q);
6559 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6560 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6561 		if (err)
6562 			break;
6563 	}
6564 
6565 	if (skb_queue_empty(&chan->srej_q)) {
6566 		chan->rx_state = L2CAP_RX_STATE_RECV;
6567 		l2cap_send_ack(chan);
6568 	}
6569 
6570 	return err;
6571 }
6572 
6573 static void l2cap_handle_srej(struct l2cap_chan *chan,
6574 			      struct l2cap_ctrl *control)
6575 {
6576 	struct sk_buff *skb;
6577 
6578 	BT_DBG("chan %p, control %p", chan, control);
6579 
6580 	if (control->reqseq == chan->next_tx_seq) {
6581 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6582 		l2cap_send_disconn_req(chan, ECONNRESET);
6583 		return;
6584 	}
6585 
6586 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6587 
6588 	if (skb == NULL) {
6589 		BT_DBG("Seq %d not available for retransmission",
6590 		       control->reqseq);
6591 		return;
6592 	}
6593 
6594 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6595 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6596 		l2cap_send_disconn_req(chan, ECONNRESET);
6597 		return;
6598 	}
6599 
6600 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6601 
6602 	if (control->poll) {
6603 		l2cap_pass_to_tx(chan, control);
6604 
6605 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6606 		l2cap_retransmit(chan, control);
6607 		l2cap_ertm_send(chan);
6608 
6609 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6610 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6611 			chan->srej_save_reqseq = control->reqseq;
6612 		}
6613 	} else {
6614 		l2cap_pass_to_tx_fbit(chan, control);
6615 
6616 		if (control->final) {
6617 			if (chan->srej_save_reqseq != control->reqseq ||
6618 			    !test_and_clear_bit(CONN_SREJ_ACT,
6619 						&chan->conn_state))
6620 				l2cap_retransmit(chan, control);
6621 		} else {
6622 			l2cap_retransmit(chan, control);
6623 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6624 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6625 				chan->srej_save_reqseq = control->reqseq;
6626 			}
6627 		}
6628 	}
6629 }
6630 
6631 static void l2cap_handle_rej(struct l2cap_chan *chan,
6632 			     struct l2cap_ctrl *control)
6633 {
6634 	struct sk_buff *skb;
6635 
6636 	BT_DBG("chan %p, control %p", chan, control);
6637 
6638 	if (control->reqseq == chan->next_tx_seq) {
6639 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6640 		l2cap_send_disconn_req(chan, ECONNRESET);
6641 		return;
6642 	}
6643 
6644 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6645 
6646 	if (chan->max_tx && skb &&
6647 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6648 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6649 		l2cap_send_disconn_req(chan, ECONNRESET);
6650 		return;
6651 	}
6652 
6653 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6654 
6655 	l2cap_pass_to_tx(chan, control);
6656 
6657 	if (control->final) {
6658 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6659 			l2cap_retransmit_all(chan, control);
6660 	} else {
6661 		l2cap_retransmit_all(chan, control);
6662 		l2cap_ertm_send(chan);
6663 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6664 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6665 	}
6666 }
6667 
6668 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6669 {
6670 	BT_DBG("chan %p, txseq %d", chan, txseq);
6671 
6672 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6673 	       chan->expected_tx_seq);
6674 
6675 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6676 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6677 		    chan->tx_win) {
6678 			/* See notes below regarding "double poll" and
6679 			 * invalid packets.
6680 			 */
6681 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6682 				BT_DBG("Invalid/Ignore - after SREJ");
6683 				return L2CAP_TXSEQ_INVALID_IGNORE;
6684 			} else {
6685 				BT_DBG("Invalid - in window after SREJ sent");
6686 				return L2CAP_TXSEQ_INVALID;
6687 			}
6688 		}
6689 
6690 		if (chan->srej_list.head == txseq) {
6691 			BT_DBG("Expected SREJ");
6692 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6693 		}
6694 
6695 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6696 			BT_DBG("Duplicate SREJ - txseq already stored");
6697 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6698 		}
6699 
6700 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6701 			BT_DBG("Unexpected SREJ - not requested");
6702 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6703 		}
6704 	}
6705 
6706 	if (chan->expected_tx_seq == txseq) {
6707 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6708 		    chan->tx_win) {
6709 			BT_DBG("Invalid - txseq outside tx window");
6710 			return L2CAP_TXSEQ_INVALID;
6711 		} else {
6712 			BT_DBG("Expected");
6713 			return L2CAP_TXSEQ_EXPECTED;
6714 		}
6715 	}
6716 
6717 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6718 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6719 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6720 		return L2CAP_TXSEQ_DUPLICATE;
6721 	}
6722 
6723 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6724 		/* A source of invalid packets is a "double poll" condition,
6725 		 * where delays cause us to send multiple poll packets.  If
6726 		 * the remote stack receives and processes both polls,
6727 		 * sequence numbers can wrap around in such a way that a
6728 		 * resent frame has a sequence number that looks like new data
6729 		 * with a sequence gap.  This would trigger an erroneous SREJ
6730 		 * request.
6731 		 *
6732 		 * Fortunately, this is impossible with a tx window that's
6733 		 * less than half of the maximum sequence number, which allows
6734 		 * invalid frames to be safely ignored.
6735 		 *
6736 		 * With tx window sizes greater than half of the tx window
6737 		 * maximum, the frame is invalid and cannot be ignored.  This
6738 		 * causes a disconnect.
6739 		 */
6740 
6741 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6742 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6743 			return L2CAP_TXSEQ_INVALID_IGNORE;
6744 		} else {
6745 			BT_DBG("Invalid - txseq outside tx window");
6746 			return L2CAP_TXSEQ_INVALID;
6747 		}
6748 	} else {
6749 		BT_DBG("Unexpected - txseq indicates missing frames");
6750 		return L2CAP_TXSEQ_UNEXPECTED;
6751 	}
6752 }
6753 
6754 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6755 			       struct l2cap_ctrl *control,
6756 			       struct sk_buff *skb, u8 event)
6757 {
6758 	int err = 0;
6759 	bool skb_in_use = false;
6760 
6761 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6762 	       event);
6763 
6764 	switch (event) {
6765 	case L2CAP_EV_RECV_IFRAME:
6766 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6767 		case L2CAP_TXSEQ_EXPECTED:
6768 			l2cap_pass_to_tx(chan, control);
6769 
6770 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6771 				BT_DBG("Busy, discarding expected seq %d",
6772 				       control->txseq);
6773 				break;
6774 			}
6775 
6776 			chan->expected_tx_seq = __next_seq(chan,
6777 							   control->txseq);
6778 
6779 			chan->buffer_seq = chan->expected_tx_seq;
6780 			skb_in_use = true;
6781 
6782 			err = l2cap_reassemble_sdu(chan, skb, control);
6783 			if (err)
6784 				break;
6785 
6786 			if (control->final) {
6787 				if (!test_and_clear_bit(CONN_REJ_ACT,
6788 							&chan->conn_state)) {
6789 					control->final = 0;
6790 					l2cap_retransmit_all(chan, control);
6791 					l2cap_ertm_send(chan);
6792 				}
6793 			}
6794 
6795 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6796 				l2cap_send_ack(chan);
6797 			break;
6798 		case L2CAP_TXSEQ_UNEXPECTED:
6799 			l2cap_pass_to_tx(chan, control);
6800 
6801 			/* Can't issue SREJ frames in the local busy state.
6802 			 * Drop this frame, it will be seen as missing
6803 			 * when local busy is exited.
6804 			 */
6805 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6806 				BT_DBG("Busy, discarding unexpected seq %d",
6807 				       control->txseq);
6808 				break;
6809 			}
6810 
6811 			/* There was a gap in the sequence, so an SREJ
6812 			 * must be sent for each missing frame.  The
6813 			 * current frame is stored for later use.
6814 			 */
6815 			skb_queue_tail(&chan->srej_q, skb);
6816 			skb_in_use = true;
6817 			BT_DBG("Queued %p (queue len %d)", skb,
6818 			       skb_queue_len(&chan->srej_q));
6819 
6820 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6821 			l2cap_seq_list_clear(&chan->srej_list);
6822 			l2cap_send_srej(chan, control->txseq);
6823 
6824 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6825 			break;
6826 		case L2CAP_TXSEQ_DUPLICATE:
6827 			l2cap_pass_to_tx(chan, control);
6828 			break;
6829 		case L2CAP_TXSEQ_INVALID_IGNORE:
6830 			break;
6831 		case L2CAP_TXSEQ_INVALID:
6832 		default:
6833 			l2cap_send_disconn_req(chan, ECONNRESET);
6834 			break;
6835 		}
6836 		break;
6837 	case L2CAP_EV_RECV_RR:
6838 		l2cap_pass_to_tx(chan, control);
6839 		if (control->final) {
6840 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6841 
6842 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6843 			    !__chan_is_moving(chan)) {
6844 				control->final = 0;
6845 				l2cap_retransmit_all(chan, control);
6846 			}
6847 
6848 			l2cap_ertm_send(chan);
6849 		} else if (control->poll) {
6850 			l2cap_send_i_or_rr_or_rnr(chan);
6851 		} else {
6852 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6853 					       &chan->conn_state) &&
6854 			    chan->unacked_frames)
6855 				__set_retrans_timer(chan);
6856 
6857 			l2cap_ertm_send(chan);
6858 		}
6859 		break;
6860 	case L2CAP_EV_RECV_RNR:
6861 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6862 		l2cap_pass_to_tx(chan, control);
6863 		if (control && control->poll) {
6864 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6865 			l2cap_send_rr_or_rnr(chan, 0);
6866 		}
6867 		__clear_retrans_timer(chan);
6868 		l2cap_seq_list_clear(&chan->retrans_list);
6869 		break;
6870 	case L2CAP_EV_RECV_REJ:
6871 		l2cap_handle_rej(chan, control);
6872 		break;
6873 	case L2CAP_EV_RECV_SREJ:
6874 		l2cap_handle_srej(chan, control);
6875 		break;
6876 	default:
6877 		break;
6878 	}
6879 
6880 	if (skb && !skb_in_use) {
6881 		BT_DBG("Freeing %p", skb);
6882 		kfree_skb(skb);
6883 	}
6884 
6885 	return err;
6886 }
6887 
6888 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6889 				    struct l2cap_ctrl *control,
6890 				    struct sk_buff *skb, u8 event)
6891 {
6892 	int err = 0;
6893 	u16 txseq = control->txseq;
6894 	bool skb_in_use = false;
6895 
6896 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6897 	       event);
6898 
6899 	switch (event) {
6900 	case L2CAP_EV_RECV_IFRAME:
6901 		switch (l2cap_classify_txseq(chan, txseq)) {
6902 		case L2CAP_TXSEQ_EXPECTED:
6903 			/* Keep frame for reassembly later */
6904 			l2cap_pass_to_tx(chan, control);
6905 			skb_queue_tail(&chan->srej_q, skb);
6906 			skb_in_use = true;
6907 			BT_DBG("Queued %p (queue len %d)", skb,
6908 			       skb_queue_len(&chan->srej_q));
6909 
6910 			chan->expected_tx_seq = __next_seq(chan, txseq);
6911 			break;
6912 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6913 			l2cap_seq_list_pop(&chan->srej_list);
6914 
6915 			l2cap_pass_to_tx(chan, control);
6916 			skb_queue_tail(&chan->srej_q, skb);
6917 			skb_in_use = true;
6918 			BT_DBG("Queued %p (queue len %d)", skb,
6919 			       skb_queue_len(&chan->srej_q));
6920 
6921 			err = l2cap_rx_queued_iframes(chan);
6922 			if (err)
6923 				break;
6924 
6925 			break;
6926 		case L2CAP_TXSEQ_UNEXPECTED:
6927 			/* Got a frame that can't be reassembled yet.
6928 			 * Save it for later, and send SREJs to cover
6929 			 * the missing frames.
6930 			 */
6931 			skb_queue_tail(&chan->srej_q, skb);
6932 			skb_in_use = true;
6933 			BT_DBG("Queued %p (queue len %d)", skb,
6934 			       skb_queue_len(&chan->srej_q));
6935 
6936 			l2cap_pass_to_tx(chan, control);
6937 			l2cap_send_srej(chan, control->txseq);
6938 			break;
6939 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6940 			/* This frame was requested with an SREJ, but
6941 			 * some expected retransmitted frames are
6942 			 * missing.  Request retransmission of missing
6943 			 * SREJ'd frames.
6944 			 */
6945 			skb_queue_tail(&chan->srej_q, skb);
6946 			skb_in_use = true;
6947 			BT_DBG("Queued %p (queue len %d)", skb,
6948 			       skb_queue_len(&chan->srej_q));
6949 
6950 			l2cap_pass_to_tx(chan, control);
6951 			l2cap_send_srej_list(chan, control->txseq);
6952 			break;
6953 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6954 			/* We've already queued this frame.  Drop this copy. */
6955 			l2cap_pass_to_tx(chan, control);
6956 			break;
6957 		case L2CAP_TXSEQ_DUPLICATE:
6958 			/* Expecting a later sequence number, so this frame
6959 			 * was already received.  Ignore it completely.
6960 			 */
6961 			break;
6962 		case L2CAP_TXSEQ_INVALID_IGNORE:
6963 			break;
6964 		case L2CAP_TXSEQ_INVALID:
6965 		default:
6966 			l2cap_send_disconn_req(chan, ECONNRESET);
6967 			break;
6968 		}
6969 		break;
6970 	case L2CAP_EV_RECV_RR:
6971 		l2cap_pass_to_tx(chan, control);
6972 		if (control->final) {
6973 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6974 
6975 			if (!test_and_clear_bit(CONN_REJ_ACT,
6976 						&chan->conn_state)) {
6977 				control->final = 0;
6978 				l2cap_retransmit_all(chan, control);
6979 			}
6980 
6981 			l2cap_ertm_send(chan);
6982 		} else if (control->poll) {
6983 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6984 					       &chan->conn_state) &&
6985 			    chan->unacked_frames) {
6986 				__set_retrans_timer(chan);
6987 			}
6988 
6989 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6990 			l2cap_send_srej_tail(chan);
6991 		} else {
6992 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6993 					       &chan->conn_state) &&
6994 			    chan->unacked_frames)
6995 				__set_retrans_timer(chan);
6996 
6997 			l2cap_send_ack(chan);
6998 		}
6999 		break;
7000 	case L2CAP_EV_RECV_RNR:
7001 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7002 		l2cap_pass_to_tx(chan, control);
7003 		if (control->poll) {
7004 			l2cap_send_srej_tail(chan);
7005 		} else {
7006 			struct l2cap_ctrl rr_control;
7007 			memset(&rr_control, 0, sizeof(rr_control));
7008 			rr_control.sframe = 1;
7009 			rr_control.super = L2CAP_SUPER_RR;
7010 			rr_control.reqseq = chan->buffer_seq;
7011 			l2cap_send_sframe(chan, &rr_control);
7012 		}
7013 
7014 		break;
7015 	case L2CAP_EV_RECV_REJ:
7016 		l2cap_handle_rej(chan, control);
7017 		break;
7018 	case L2CAP_EV_RECV_SREJ:
7019 		l2cap_handle_srej(chan, control);
7020 		break;
7021 	}
7022 
7023 	if (skb && !skb_in_use) {
7024 		BT_DBG("Freeing %p", skb);
7025 		kfree_skb(skb);
7026 	}
7027 
7028 	return err;
7029 }
7030 
7031 static int l2cap_finish_move(struct l2cap_chan *chan)
7032 {
7033 	BT_DBG("chan %p", chan);
7034 
7035 	chan->rx_state = L2CAP_RX_STATE_RECV;
7036 
7037 	if (chan->hs_hcon)
7038 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7039 	else
7040 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7041 
7042 	return l2cap_resegment(chan);
7043 }
7044 
7045 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7046 				 struct l2cap_ctrl *control,
7047 				 struct sk_buff *skb, u8 event)
7048 {
7049 	int err;
7050 
7051 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7052 	       event);
7053 
7054 	if (!control->poll)
7055 		return -EPROTO;
7056 
7057 	l2cap_process_reqseq(chan, control->reqseq);
7058 
7059 	if (!skb_queue_empty(&chan->tx_q))
7060 		chan->tx_send_head = skb_peek(&chan->tx_q);
7061 	else
7062 		chan->tx_send_head = NULL;
7063 
7064 	/* Rewind next_tx_seq to the point expected
7065 	 * by the receiver.
7066 	 */
7067 	chan->next_tx_seq = control->reqseq;
7068 	chan->unacked_frames = 0;
7069 
7070 	err = l2cap_finish_move(chan);
7071 	if (err)
7072 		return err;
7073 
7074 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7075 	l2cap_send_i_or_rr_or_rnr(chan);
7076 
7077 	if (event == L2CAP_EV_RECV_IFRAME)
7078 		return -EPROTO;
7079 
7080 	return l2cap_rx_state_recv(chan, control, NULL, event);
7081 }
7082 
7083 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7084 				 struct l2cap_ctrl *control,
7085 				 struct sk_buff *skb, u8 event)
7086 {
7087 	int err;
7088 
7089 	if (!control->final)
7090 		return -EPROTO;
7091 
7092 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7093 
7094 	chan->rx_state = L2CAP_RX_STATE_RECV;
7095 	l2cap_process_reqseq(chan, control->reqseq);
7096 
7097 	if (!skb_queue_empty(&chan->tx_q))
7098 		chan->tx_send_head = skb_peek(&chan->tx_q);
7099 	else
7100 		chan->tx_send_head = NULL;
7101 
7102 	/* Rewind next_tx_seq to the point expected
7103 	 * by the receiver.
7104 	 */
7105 	chan->next_tx_seq = control->reqseq;
7106 	chan->unacked_frames = 0;
7107 
7108 	if (chan->hs_hcon)
7109 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7110 	else
7111 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7112 
7113 	err = l2cap_resegment(chan);
7114 
7115 	if (!err)
7116 		err = l2cap_rx_state_recv(chan, control, skb, event);
7117 
7118 	return err;
7119 }
7120 
7121 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7122 {
7123 	/* Make sure reqseq is for a packet that has been sent but not acked */
7124 	u16 unacked;
7125 
7126 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7127 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7128 }
7129 
7130 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7131 		    struct sk_buff *skb, u8 event)
7132 {
7133 	int err = 0;
7134 
7135 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7136 	       control, skb, event, chan->rx_state);
7137 
7138 	if (__valid_reqseq(chan, control->reqseq)) {
7139 		switch (chan->rx_state) {
7140 		case L2CAP_RX_STATE_RECV:
7141 			err = l2cap_rx_state_recv(chan, control, skb, event);
7142 			break;
7143 		case L2CAP_RX_STATE_SREJ_SENT:
7144 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7145 						       event);
7146 			break;
7147 		case L2CAP_RX_STATE_WAIT_P:
7148 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7149 			break;
7150 		case L2CAP_RX_STATE_WAIT_F:
7151 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7152 			break;
7153 		default:
7154 			/* shut it down */
7155 			break;
7156 		}
7157 	} else {
7158 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7159 		       control->reqseq, chan->next_tx_seq,
7160 		       chan->expected_ack_seq);
7161 		l2cap_send_disconn_req(chan, ECONNRESET);
7162 	}
7163 
7164 	return err;
7165 }
7166 
7167 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7168 			   struct sk_buff *skb)
7169 {
7170 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7171 	       chan->rx_state);
7172 
7173 	if (l2cap_classify_txseq(chan, control->txseq) ==
7174 	    L2CAP_TXSEQ_EXPECTED) {
7175 		l2cap_pass_to_tx(chan, control);
7176 
7177 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7178 		       __next_seq(chan, chan->buffer_seq));
7179 
7180 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7181 
7182 		l2cap_reassemble_sdu(chan, skb, control);
7183 	} else {
7184 		if (chan->sdu) {
7185 			kfree_skb(chan->sdu);
7186 			chan->sdu = NULL;
7187 		}
7188 		chan->sdu_last_frag = NULL;
7189 		chan->sdu_len = 0;
7190 
7191 		if (skb) {
7192 			BT_DBG("Freeing %p", skb);
7193 			kfree_skb(skb);
7194 		}
7195 	}
7196 
7197 	chan->last_acked_seq = control->txseq;
7198 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
7199 
7200 	return 0;
7201 }
7202 
7203 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7204 {
7205 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7206 	u16 len;
7207 	u8 event;
7208 
7209 	__unpack_control(chan, skb);
7210 
7211 	len = skb->len;
7212 
7213 	/*
7214 	 * We can just drop the corrupted I-frame here.
7215 	 * Receiver will miss it and start proper recovery
7216 	 * procedures and ask for retransmission.
7217 	 */
7218 	if (l2cap_check_fcs(chan, skb))
7219 		goto drop;
7220 
7221 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7222 		len -= L2CAP_SDULEN_SIZE;
7223 
7224 	if (chan->fcs == L2CAP_FCS_CRC16)
7225 		len -= L2CAP_FCS_SIZE;
7226 
7227 	if (len > chan->mps) {
7228 		l2cap_send_disconn_req(chan, ECONNRESET);
7229 		goto drop;
7230 	}
7231 
7232 	if ((chan->mode == L2CAP_MODE_ERTM ||
7233 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
7234 		goto drop;
7235 
7236 	if (!control->sframe) {
7237 		int err;
7238 
7239 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7240 		       control->sar, control->reqseq, control->final,
7241 		       control->txseq);
7242 
7243 		/* Validate F-bit - F=0 always valid, F=1 only
7244 		 * valid in TX WAIT_F
7245 		 */
7246 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7247 			goto drop;
7248 
7249 		if (chan->mode != L2CAP_MODE_STREAMING) {
7250 			event = L2CAP_EV_RECV_IFRAME;
7251 			err = l2cap_rx(chan, control, skb, event);
7252 		} else {
7253 			err = l2cap_stream_rx(chan, control, skb);
7254 		}
7255 
7256 		if (err)
7257 			l2cap_send_disconn_req(chan, ECONNRESET);
7258 	} else {
7259 		const u8 rx_func_to_event[4] = {
7260 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7261 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7262 		};
7263 
7264 		/* Only I-frames are expected in streaming mode */
7265 		if (chan->mode == L2CAP_MODE_STREAMING)
7266 			goto drop;
7267 
7268 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7269 		       control->reqseq, control->final, control->poll,
7270 		       control->super);
7271 
7272 		if (len != 0) {
7273 			BT_ERR("Trailing bytes: %d in sframe", len);
7274 			l2cap_send_disconn_req(chan, ECONNRESET);
7275 			goto drop;
7276 		}
7277 
7278 		/* Validate F and P bits */
7279 		if (control->final && (control->poll ||
7280 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7281 			goto drop;
7282 
7283 		event = rx_func_to_event[control->super];
7284 		if (l2cap_rx(chan, control, skb, event))
7285 			l2cap_send_disconn_req(chan, ECONNRESET);
7286 	}
7287 
7288 	return 0;
7289 
7290 drop:
7291 	kfree_skb(skb);
7292 	return 0;
7293 }
7294 
7295 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7296 {
7297 	struct l2cap_conn *conn = chan->conn;
7298 	struct l2cap_le_credits pkt;
7299 	u16 return_credits;
7300 
7301 	return_credits = (chan->imtu / chan->mps) + 1;
7302 
7303 	if (chan->rx_credits >= return_credits)
7304 		return;
7305 
7306 	return_credits -= chan->rx_credits;
7307 
7308 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7309 
7310 	chan->rx_credits += return_credits;
7311 
7312 	pkt.cid     = cpu_to_le16(chan->scid);
7313 	pkt.credits = cpu_to_le16(return_credits);
7314 
7315 	chan->ident = l2cap_get_ident(conn);
7316 
7317 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7318 }
7319 
7320 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7321 {
7322 	int err;
7323 
7324 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7325 
7326 	/* Wait recv to confirm reception before updating the credits */
7327 	err = chan->ops->recv(chan, skb);
7328 
7329 	/* Update credits whenever an SDU is received */
7330 	l2cap_chan_le_send_credits(chan);
7331 
7332 	return err;
7333 }
7334 
7335 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7336 {
7337 	int err;
7338 
7339 	if (!chan->rx_credits) {
7340 		BT_ERR("No credits to receive LE L2CAP data");
7341 		l2cap_send_disconn_req(chan, ECONNRESET);
7342 		return -ENOBUFS;
7343 	}
7344 
7345 	if (chan->imtu < skb->len) {
7346 		BT_ERR("Too big LE L2CAP PDU");
7347 		return -ENOBUFS;
7348 	}
7349 
7350 	chan->rx_credits--;
7351 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7352 
7353 	/* Update if remote had run out of credits, this should only happens
7354 	 * if the remote is not using the entire MPS.
7355 	 */
7356 	if (!chan->rx_credits)
7357 		l2cap_chan_le_send_credits(chan);
7358 
7359 	err = 0;
7360 
7361 	if (!chan->sdu) {
7362 		u16 sdu_len;
7363 
7364 		sdu_len = get_unaligned_le16(skb->data);
7365 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7366 
7367 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7368 		       sdu_len, skb->len, chan->imtu);
7369 
7370 		if (sdu_len > chan->imtu) {
7371 			BT_ERR("Too big LE L2CAP SDU length received");
7372 			err = -EMSGSIZE;
7373 			goto failed;
7374 		}
7375 
7376 		if (skb->len > sdu_len) {
7377 			BT_ERR("Too much LE L2CAP data received");
7378 			err = -EINVAL;
7379 			goto failed;
7380 		}
7381 
7382 		if (skb->len == sdu_len)
7383 			return l2cap_ecred_recv(chan, skb);
7384 
7385 		chan->sdu = skb;
7386 		chan->sdu_len = sdu_len;
7387 		chan->sdu_last_frag = skb;
7388 
7389 		/* Detect if remote is not able to use the selected MPS */
7390 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7391 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7392 
7393 			/* Adjust the number of credits */
7394 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7395 			chan->mps = mps_len;
7396 			l2cap_chan_le_send_credits(chan);
7397 		}
7398 
7399 		return 0;
7400 	}
7401 
7402 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7403 	       chan->sdu->len, skb->len, chan->sdu_len);
7404 
7405 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7406 		BT_ERR("Too much LE L2CAP data received");
7407 		err = -EINVAL;
7408 		goto failed;
7409 	}
7410 
7411 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7412 	skb = NULL;
7413 
7414 	if (chan->sdu->len == chan->sdu_len) {
7415 		err = l2cap_ecred_recv(chan, chan->sdu);
7416 		if (!err) {
7417 			chan->sdu = NULL;
7418 			chan->sdu_last_frag = NULL;
7419 			chan->sdu_len = 0;
7420 		}
7421 	}
7422 
7423 failed:
7424 	if (err) {
7425 		kfree_skb(skb);
7426 		kfree_skb(chan->sdu);
7427 		chan->sdu = NULL;
7428 		chan->sdu_last_frag = NULL;
7429 		chan->sdu_len = 0;
7430 	}
7431 
7432 	/* We can't return an error here since we took care of the skb
7433 	 * freeing internally. An error return would cause the caller to
7434 	 * do a double-free of the skb.
7435 	 */
7436 	return 0;
7437 }
7438 
7439 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7440 			       struct sk_buff *skb)
7441 {
7442 	struct l2cap_chan *chan;
7443 
7444 	chan = l2cap_get_chan_by_scid(conn, cid);
7445 	if (!chan) {
7446 		if (cid == L2CAP_CID_A2MP) {
7447 			chan = a2mp_channel_create(conn, skb);
7448 			if (!chan) {
7449 				kfree_skb(skb);
7450 				return;
7451 			}
7452 
7453 			l2cap_chan_lock(chan);
7454 		} else {
7455 			BT_DBG("unknown cid 0x%4.4x", cid);
7456 			/* Drop packet and return */
7457 			kfree_skb(skb);
7458 			return;
7459 		}
7460 	}
7461 
7462 	BT_DBG("chan %p, len %d", chan, skb->len);
7463 
7464 	/* If we receive data on a fixed channel before the info req/rsp
7465 	 * procdure is done simply assume that the channel is supported
7466 	 * and mark it as ready.
7467 	 */
7468 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7469 		l2cap_chan_ready(chan);
7470 
7471 	if (chan->state != BT_CONNECTED)
7472 		goto drop;
7473 
7474 	switch (chan->mode) {
7475 	case L2CAP_MODE_LE_FLOWCTL:
7476 	case L2CAP_MODE_EXT_FLOWCTL:
7477 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7478 			goto drop;
7479 
7480 		goto done;
7481 
7482 	case L2CAP_MODE_BASIC:
7483 		/* If socket recv buffers overflows we drop data here
7484 		 * which is *bad* because L2CAP has to be reliable.
7485 		 * But we don't have any other choice. L2CAP doesn't
7486 		 * provide flow control mechanism. */
7487 
7488 		if (chan->imtu < skb->len) {
7489 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7490 			goto drop;
7491 		}
7492 
7493 		if (!chan->ops->recv(chan, skb))
7494 			goto done;
7495 		break;
7496 
7497 	case L2CAP_MODE_ERTM:
7498 	case L2CAP_MODE_STREAMING:
7499 		l2cap_data_rcv(chan, skb);
7500 		goto done;
7501 
7502 	default:
7503 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7504 		break;
7505 	}
7506 
7507 drop:
7508 	kfree_skb(skb);
7509 
7510 done:
7511 	l2cap_chan_unlock(chan);
7512 }
7513 
7514 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7515 				  struct sk_buff *skb)
7516 {
7517 	struct hci_conn *hcon = conn->hcon;
7518 	struct l2cap_chan *chan;
7519 
7520 	if (hcon->type != ACL_LINK)
7521 		goto free_skb;
7522 
7523 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7524 					ACL_LINK);
7525 	if (!chan)
7526 		goto free_skb;
7527 
7528 	BT_DBG("chan %p, len %d", chan, skb->len);
7529 
7530 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7531 		goto drop;
7532 
7533 	if (chan->imtu < skb->len)
7534 		goto drop;
7535 
7536 	/* Store remote BD_ADDR and PSM for msg_name */
7537 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7538 	bt_cb(skb)->l2cap.psm = psm;
7539 
7540 	if (!chan->ops->recv(chan, skb)) {
7541 		l2cap_chan_put(chan);
7542 		return;
7543 	}
7544 
7545 drop:
7546 	l2cap_chan_put(chan);
7547 free_skb:
7548 	kfree_skb(skb);
7549 }
7550 
7551 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7552 {
7553 	struct l2cap_hdr *lh = (void *) skb->data;
7554 	struct hci_conn *hcon = conn->hcon;
7555 	u16 cid, len;
7556 	__le16 psm;
7557 
7558 	if (hcon->state != BT_CONNECTED) {
7559 		BT_DBG("queueing pending rx skb");
7560 		skb_queue_tail(&conn->pending_rx, skb);
7561 		return;
7562 	}
7563 
7564 	skb_pull(skb, L2CAP_HDR_SIZE);
7565 	cid = __le16_to_cpu(lh->cid);
7566 	len = __le16_to_cpu(lh->len);
7567 
7568 	if (len != skb->len) {
7569 		kfree_skb(skb);
7570 		return;
7571 	}
7572 
7573 	/* Since we can't actively block incoming LE connections we must
7574 	 * at least ensure that we ignore incoming data from them.
7575 	 */
7576 	if (hcon->type == LE_LINK &&
7577 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7578 				   bdaddr_dst_type(hcon))) {
7579 		kfree_skb(skb);
7580 		return;
7581 	}
7582 
7583 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7584 
7585 	switch (cid) {
7586 	case L2CAP_CID_SIGNALING:
7587 		l2cap_sig_channel(conn, skb);
7588 		break;
7589 
7590 	case L2CAP_CID_CONN_LESS:
7591 		psm = get_unaligned((__le16 *) skb->data);
7592 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7593 		l2cap_conless_channel(conn, psm, skb);
7594 		break;
7595 
7596 	case L2CAP_CID_LE_SIGNALING:
7597 		l2cap_le_sig_channel(conn, skb);
7598 		break;
7599 
7600 	default:
7601 		l2cap_data_channel(conn, cid, skb);
7602 		break;
7603 	}
7604 }
7605 
7606 static void process_pending_rx(struct work_struct *work)
7607 {
7608 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7609 					       pending_rx_work);
7610 	struct sk_buff *skb;
7611 
7612 	BT_DBG("");
7613 
7614 	while ((skb = skb_dequeue(&conn->pending_rx)))
7615 		l2cap_recv_frame(conn, skb);
7616 }
7617 
7618 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7619 {
7620 	struct l2cap_conn *conn = hcon->l2cap_data;
7621 	struct hci_chan *hchan;
7622 
7623 	if (conn)
7624 		return conn;
7625 
7626 	hchan = hci_chan_create(hcon);
7627 	if (!hchan)
7628 		return NULL;
7629 
7630 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7631 	if (!conn) {
7632 		hci_chan_del(hchan);
7633 		return NULL;
7634 	}
7635 
7636 	kref_init(&conn->ref);
7637 	hcon->l2cap_data = conn;
7638 	conn->hcon = hci_conn_get(hcon);
7639 	conn->hchan = hchan;
7640 
7641 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7642 
7643 	switch (hcon->type) {
7644 	case LE_LINK:
7645 		if (hcon->hdev->le_mtu) {
7646 			conn->mtu = hcon->hdev->le_mtu;
7647 			break;
7648 		}
7649 		/* fall through */
7650 	default:
7651 		conn->mtu = hcon->hdev->acl_mtu;
7652 		break;
7653 	}
7654 
7655 	conn->feat_mask = 0;
7656 
7657 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7658 
7659 	if (hcon->type == ACL_LINK &&
7660 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7661 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7662 
7663 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7664 	    (bredr_sc_enabled(hcon->hdev) ||
7665 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7666 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7667 
7668 	mutex_init(&conn->ident_lock);
7669 	mutex_init(&conn->chan_lock);
7670 
7671 	INIT_LIST_HEAD(&conn->chan_l);
7672 	INIT_LIST_HEAD(&conn->users);
7673 
7674 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7675 
7676 	skb_queue_head_init(&conn->pending_rx);
7677 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7678 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7679 
7680 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7681 
7682 	return conn;
7683 }
7684 
7685 static bool is_valid_psm(u16 psm, u8 dst_type) {
7686 	if (!psm)
7687 		return false;
7688 
7689 	if (bdaddr_type_is_le(dst_type))
7690 		return (psm <= 0x00ff);
7691 
7692 	/* PSM must be odd and lsb of upper byte must be 0 */
7693 	return ((psm & 0x0101) == 0x0001);
7694 }
7695 
7696 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7697 		       bdaddr_t *dst, u8 dst_type)
7698 {
7699 	struct l2cap_conn *conn;
7700 	struct hci_conn *hcon;
7701 	struct hci_dev *hdev;
7702 	int err;
7703 
7704 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7705 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7706 
7707 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7708 	if (!hdev)
7709 		return -EHOSTUNREACH;
7710 
7711 	hci_dev_lock(hdev);
7712 
7713 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7714 	    chan->chan_type != L2CAP_CHAN_RAW) {
7715 		err = -EINVAL;
7716 		goto done;
7717 	}
7718 
7719 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7720 		err = -EINVAL;
7721 		goto done;
7722 	}
7723 
7724 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7725 		err = -EINVAL;
7726 		goto done;
7727 	}
7728 
7729 	switch (chan->mode) {
7730 	case L2CAP_MODE_BASIC:
7731 		break;
7732 	case L2CAP_MODE_LE_FLOWCTL:
7733 		break;
7734 	case L2CAP_MODE_EXT_FLOWCTL:
7735 		if (!enable_ecred) {
7736 			err = -EOPNOTSUPP;
7737 			goto done;
7738 		}
7739 		break;
7740 	case L2CAP_MODE_ERTM:
7741 	case L2CAP_MODE_STREAMING:
7742 		if (!disable_ertm)
7743 			break;
7744 		/* fall through */
7745 	default:
7746 		err = -EOPNOTSUPP;
7747 		goto done;
7748 	}
7749 
7750 	switch (chan->state) {
7751 	case BT_CONNECT:
7752 	case BT_CONNECT2:
7753 	case BT_CONFIG:
7754 		/* Already connecting */
7755 		err = 0;
7756 		goto done;
7757 
7758 	case BT_CONNECTED:
7759 		/* Already connected */
7760 		err = -EISCONN;
7761 		goto done;
7762 
7763 	case BT_OPEN:
7764 	case BT_BOUND:
7765 		/* Can connect */
7766 		break;
7767 
7768 	default:
7769 		err = -EBADFD;
7770 		goto done;
7771 	}
7772 
7773 	/* Set destination address and psm */
7774 	bacpy(&chan->dst, dst);
7775 	chan->dst_type = dst_type;
7776 
7777 	chan->psm = psm;
7778 	chan->dcid = cid;
7779 
7780 	if (bdaddr_type_is_le(dst_type)) {
7781 		/* Convert from L2CAP channel address type to HCI address type
7782 		 */
7783 		if (dst_type == BDADDR_LE_PUBLIC)
7784 			dst_type = ADDR_LE_DEV_PUBLIC;
7785 		else
7786 			dst_type = ADDR_LE_DEV_RANDOM;
7787 
7788 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7789 			hcon = hci_connect_le(hdev, dst, dst_type,
7790 					      chan->sec_level,
7791 					      HCI_LE_CONN_TIMEOUT,
7792 					      HCI_ROLE_SLAVE, NULL);
7793 		else
7794 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7795 						   chan->sec_level,
7796 						   HCI_LE_CONN_TIMEOUT);
7797 
7798 	} else {
7799 		u8 auth_type = l2cap_get_auth_type(chan);
7800 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7801 	}
7802 
7803 	if (IS_ERR(hcon)) {
7804 		err = PTR_ERR(hcon);
7805 		goto done;
7806 	}
7807 
7808 	conn = l2cap_conn_add(hcon);
7809 	if (!conn) {
7810 		hci_conn_drop(hcon);
7811 		err = -ENOMEM;
7812 		goto done;
7813 	}
7814 
7815 	mutex_lock(&conn->chan_lock);
7816 	l2cap_chan_lock(chan);
7817 
7818 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7819 		hci_conn_drop(hcon);
7820 		err = -EBUSY;
7821 		goto chan_unlock;
7822 	}
7823 
7824 	/* Update source addr of the socket */
7825 	bacpy(&chan->src, &hcon->src);
7826 	chan->src_type = bdaddr_src_type(hcon);
7827 
7828 	__l2cap_chan_add(conn, chan);
7829 
7830 	/* l2cap_chan_add takes its own ref so we can drop this one */
7831 	hci_conn_drop(hcon);
7832 
7833 	l2cap_state_change(chan, BT_CONNECT);
7834 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7835 
7836 	/* Release chan->sport so that it can be reused by other
7837 	 * sockets (as it's only used for listening sockets).
7838 	 */
7839 	write_lock(&chan_list_lock);
7840 	chan->sport = 0;
7841 	write_unlock(&chan_list_lock);
7842 
7843 	if (hcon->state == BT_CONNECTED) {
7844 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7845 			__clear_chan_timer(chan);
7846 			if (l2cap_chan_check_security(chan, true))
7847 				l2cap_state_change(chan, BT_CONNECTED);
7848 		} else
7849 			l2cap_do_start(chan);
7850 	}
7851 
7852 	err = 0;
7853 
7854 chan_unlock:
7855 	l2cap_chan_unlock(chan);
7856 	mutex_unlock(&conn->chan_lock);
7857 done:
7858 	hci_dev_unlock(hdev);
7859 	hci_dev_put(hdev);
7860 	return err;
7861 }
7862 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7863 
7864 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7865 {
7866 	struct l2cap_conn *conn = chan->conn;
7867 	struct {
7868 		struct l2cap_ecred_reconf_req req;
7869 		__le16 scid;
7870 	} pdu;
7871 
7872 	pdu.req.mtu = cpu_to_le16(chan->imtu);
7873 	pdu.req.mps = cpu_to_le16(chan->mps);
7874 	pdu.scid    = cpu_to_le16(chan->scid);
7875 
7876 	chan->ident = l2cap_get_ident(conn);
7877 
7878 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7879 		       sizeof(pdu), &pdu);
7880 }
7881 
7882 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7883 {
7884 	if (chan->imtu > mtu)
7885 		return -EINVAL;
7886 
7887 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7888 
7889 	chan->imtu = mtu;
7890 
7891 	l2cap_ecred_reconfigure(chan);
7892 
7893 	return 0;
7894 }
7895 
7896 /* ---- L2CAP interface with lower layer (HCI) ---- */
7897 
7898 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7899 {
7900 	int exact = 0, lm1 = 0, lm2 = 0;
7901 	struct l2cap_chan *c;
7902 
7903 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7904 
7905 	/* Find listening sockets and check their link_mode */
7906 	read_lock(&chan_list_lock);
7907 	list_for_each_entry(c, &chan_list, global_l) {
7908 		if (c->state != BT_LISTEN)
7909 			continue;
7910 
7911 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7912 			lm1 |= HCI_LM_ACCEPT;
7913 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7914 				lm1 |= HCI_LM_MASTER;
7915 			exact++;
7916 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7917 			lm2 |= HCI_LM_ACCEPT;
7918 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7919 				lm2 |= HCI_LM_MASTER;
7920 		}
7921 	}
7922 	read_unlock(&chan_list_lock);
7923 
7924 	return exact ? lm1 : lm2;
7925 }
7926 
7927 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7928  * from an existing channel in the list or from the beginning of the
7929  * global list (by passing NULL as first parameter).
7930  */
7931 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7932 						  struct hci_conn *hcon)
7933 {
7934 	u8 src_type = bdaddr_src_type(hcon);
7935 
7936 	read_lock(&chan_list_lock);
7937 
7938 	if (c)
7939 		c = list_next_entry(c, global_l);
7940 	else
7941 		c = list_entry(chan_list.next, typeof(*c), global_l);
7942 
7943 	list_for_each_entry_from(c, &chan_list, global_l) {
7944 		if (c->chan_type != L2CAP_CHAN_FIXED)
7945 			continue;
7946 		if (c->state != BT_LISTEN)
7947 			continue;
7948 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7949 			continue;
7950 		if (src_type != c->src_type)
7951 			continue;
7952 
7953 		l2cap_chan_hold(c);
7954 		read_unlock(&chan_list_lock);
7955 		return c;
7956 	}
7957 
7958 	read_unlock(&chan_list_lock);
7959 
7960 	return NULL;
7961 }
7962 
7963 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7964 {
7965 	struct hci_dev *hdev = hcon->hdev;
7966 	struct l2cap_conn *conn;
7967 	struct l2cap_chan *pchan;
7968 	u8 dst_type;
7969 
7970 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7971 		return;
7972 
7973 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7974 
7975 	if (status) {
7976 		l2cap_conn_del(hcon, bt_to_errno(status));
7977 		return;
7978 	}
7979 
7980 	conn = l2cap_conn_add(hcon);
7981 	if (!conn)
7982 		return;
7983 
7984 	dst_type = bdaddr_dst_type(hcon);
7985 
7986 	/* If device is blocked, do not create channels for it */
7987 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7988 		return;
7989 
7990 	/* Find fixed channels and notify them of the new connection. We
7991 	 * use multiple individual lookups, continuing each time where
7992 	 * we left off, because the list lock would prevent calling the
7993 	 * potentially sleeping l2cap_chan_lock() function.
7994 	 */
7995 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7996 	while (pchan) {
7997 		struct l2cap_chan *chan, *next;
7998 
7999 		/* Client fixed channels should override server ones */
8000 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8001 			goto next;
8002 
8003 		l2cap_chan_lock(pchan);
8004 		chan = pchan->ops->new_connection(pchan);
8005 		if (chan) {
8006 			bacpy(&chan->src, &hcon->src);
8007 			bacpy(&chan->dst, &hcon->dst);
8008 			chan->src_type = bdaddr_src_type(hcon);
8009 			chan->dst_type = dst_type;
8010 
8011 			__l2cap_chan_add(conn, chan);
8012 		}
8013 
8014 		l2cap_chan_unlock(pchan);
8015 next:
8016 		next = l2cap_global_fixed_chan(pchan, hcon);
8017 		l2cap_chan_put(pchan);
8018 		pchan = next;
8019 	}
8020 
8021 	l2cap_conn_ready(conn);
8022 }
8023 
8024 int l2cap_disconn_ind(struct hci_conn *hcon)
8025 {
8026 	struct l2cap_conn *conn = hcon->l2cap_data;
8027 
8028 	BT_DBG("hcon %p", hcon);
8029 
8030 	if (!conn)
8031 		return HCI_ERROR_REMOTE_USER_TERM;
8032 	return conn->disc_reason;
8033 }
8034 
8035 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8036 {
8037 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8038 		return;
8039 
8040 	BT_DBG("hcon %p reason %d", hcon, reason);
8041 
8042 	l2cap_conn_del(hcon, bt_to_errno(reason));
8043 }
8044 
8045 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8046 {
8047 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8048 		return;
8049 
8050 	if (encrypt == 0x00) {
8051 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8052 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8053 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8054 			   chan->sec_level == BT_SECURITY_FIPS)
8055 			l2cap_chan_close(chan, ECONNREFUSED);
8056 	} else {
8057 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8058 			__clear_chan_timer(chan);
8059 	}
8060 }
8061 
8062 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8063 {
8064 	struct l2cap_conn *conn = hcon->l2cap_data;
8065 	struct l2cap_chan *chan;
8066 
8067 	if (!conn)
8068 		return;
8069 
8070 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8071 
8072 	mutex_lock(&conn->chan_lock);
8073 
8074 	list_for_each_entry(chan, &conn->chan_l, list) {
8075 		l2cap_chan_lock(chan);
8076 
8077 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8078 		       state_to_string(chan->state));
8079 
8080 		if (chan->scid == L2CAP_CID_A2MP) {
8081 			l2cap_chan_unlock(chan);
8082 			continue;
8083 		}
8084 
8085 		if (!status && encrypt)
8086 			chan->sec_level = hcon->sec_level;
8087 
8088 		if (!__l2cap_no_conn_pending(chan)) {
8089 			l2cap_chan_unlock(chan);
8090 			continue;
8091 		}
8092 
8093 		if (!status && (chan->state == BT_CONNECTED ||
8094 				chan->state == BT_CONFIG)) {
8095 			chan->ops->resume(chan);
8096 			l2cap_check_encryption(chan, encrypt);
8097 			l2cap_chan_unlock(chan);
8098 			continue;
8099 		}
8100 
8101 		if (chan->state == BT_CONNECT) {
8102 			if (!status && l2cap_check_enc_key_size(hcon))
8103 				l2cap_start_connection(chan);
8104 			else
8105 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8106 		} else if (chan->state == BT_CONNECT2 &&
8107 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8108 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8109 			struct l2cap_conn_rsp rsp;
8110 			__u16 res, stat;
8111 
8112 			if (!status && l2cap_check_enc_key_size(hcon)) {
8113 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8114 					res = L2CAP_CR_PEND;
8115 					stat = L2CAP_CS_AUTHOR_PEND;
8116 					chan->ops->defer(chan);
8117 				} else {
8118 					l2cap_state_change(chan, BT_CONFIG);
8119 					res = L2CAP_CR_SUCCESS;
8120 					stat = L2CAP_CS_NO_INFO;
8121 				}
8122 			} else {
8123 				l2cap_state_change(chan, BT_DISCONN);
8124 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8125 				res = L2CAP_CR_SEC_BLOCK;
8126 				stat = L2CAP_CS_NO_INFO;
8127 			}
8128 
8129 			rsp.scid   = cpu_to_le16(chan->dcid);
8130 			rsp.dcid   = cpu_to_le16(chan->scid);
8131 			rsp.result = cpu_to_le16(res);
8132 			rsp.status = cpu_to_le16(stat);
8133 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8134 				       sizeof(rsp), &rsp);
8135 
8136 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8137 			    res == L2CAP_CR_SUCCESS) {
8138 				char buf[128];
8139 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8140 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8141 					       L2CAP_CONF_REQ,
8142 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8143 					       buf);
8144 				chan->num_conf_req++;
8145 			}
8146 		}
8147 
8148 		l2cap_chan_unlock(chan);
8149 	}
8150 
8151 	mutex_unlock(&conn->chan_lock);
8152 }
8153 
8154 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8155 {
8156 	struct l2cap_conn *conn = hcon->l2cap_data;
8157 	struct l2cap_hdr *hdr;
8158 	int len;
8159 
8160 	/* For AMP controller do not create l2cap conn */
8161 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8162 		goto drop;
8163 
8164 	if (!conn)
8165 		conn = l2cap_conn_add(hcon);
8166 
8167 	if (!conn)
8168 		goto drop;
8169 
8170 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8171 
8172 	switch (flags) {
8173 	case ACL_START:
8174 	case ACL_START_NO_FLUSH:
8175 	case ACL_COMPLETE:
8176 		if (conn->rx_len) {
8177 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8178 			kfree_skb(conn->rx_skb);
8179 			conn->rx_skb = NULL;
8180 			conn->rx_len = 0;
8181 			l2cap_conn_unreliable(conn, ECOMM);
8182 		}
8183 
8184 		/* Start fragment always begin with Basic L2CAP header */
8185 		if (skb->len < L2CAP_HDR_SIZE) {
8186 			BT_ERR("Frame is too short (len %d)", skb->len);
8187 			l2cap_conn_unreliable(conn, ECOMM);
8188 			goto drop;
8189 		}
8190 
8191 		hdr = (struct l2cap_hdr *) skb->data;
8192 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8193 
8194 		if (len == skb->len) {
8195 			/* Complete frame received */
8196 			l2cap_recv_frame(conn, skb);
8197 			return;
8198 		}
8199 
8200 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8201 
8202 		if (skb->len > len) {
8203 			BT_ERR("Frame is too long (len %d, expected len %d)",
8204 			       skb->len, len);
8205 			l2cap_conn_unreliable(conn, ECOMM);
8206 			goto drop;
8207 		}
8208 
8209 		/* Allocate skb for the complete frame (with header) */
8210 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8211 		if (!conn->rx_skb)
8212 			goto drop;
8213 
8214 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8215 					  skb->len);
8216 		conn->rx_len = len - skb->len;
8217 		break;
8218 
8219 	case ACL_CONT:
8220 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8221 
8222 		if (!conn->rx_len) {
8223 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8224 			l2cap_conn_unreliable(conn, ECOMM);
8225 			goto drop;
8226 		}
8227 
8228 		if (skb->len > conn->rx_len) {
8229 			BT_ERR("Fragment is too long (len %d, expected %d)",
8230 			       skb->len, conn->rx_len);
8231 			kfree_skb(conn->rx_skb);
8232 			conn->rx_skb = NULL;
8233 			conn->rx_len = 0;
8234 			l2cap_conn_unreliable(conn, ECOMM);
8235 			goto drop;
8236 		}
8237 
8238 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8239 					  skb->len);
8240 		conn->rx_len -= skb->len;
8241 
8242 		if (!conn->rx_len) {
8243 			/* Complete frame received. l2cap_recv_frame
8244 			 * takes ownership of the skb so set the global
8245 			 * rx_skb pointer to NULL first.
8246 			 */
8247 			struct sk_buff *rx_skb = conn->rx_skb;
8248 			conn->rx_skb = NULL;
8249 			l2cap_recv_frame(conn, rx_skb);
8250 		}
8251 		break;
8252 	}
8253 
8254 drop:
8255 	kfree_skb(skb);
8256 }
8257 
8258 static struct hci_cb l2cap_cb = {
8259 	.name		= "L2CAP",
8260 	.connect_cfm	= l2cap_connect_cfm,
8261 	.disconn_cfm	= l2cap_disconn_cfm,
8262 	.security_cfm	= l2cap_security_cfm,
8263 };
8264 
8265 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8266 {
8267 	struct l2cap_chan *c;
8268 
8269 	read_lock(&chan_list_lock);
8270 
8271 	list_for_each_entry(c, &chan_list, global_l) {
8272 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8273 			   &c->src, c->src_type, &c->dst, c->dst_type,
8274 			   c->state, __le16_to_cpu(c->psm),
8275 			   c->scid, c->dcid, c->imtu, c->omtu,
8276 			   c->sec_level, c->mode);
8277 	}
8278 
8279 	read_unlock(&chan_list_lock);
8280 
8281 	return 0;
8282 }
8283 
8284 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8285 
8286 static struct dentry *l2cap_debugfs;
8287 
8288 int __init l2cap_init(void)
8289 {
8290 	int err;
8291 
8292 	err = l2cap_init_sockets();
8293 	if (err < 0)
8294 		return err;
8295 
8296 	hci_register_cb(&l2cap_cb);
8297 
8298 	if (IS_ERR_OR_NULL(bt_debugfs))
8299 		return 0;
8300 
8301 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8302 					    NULL, &l2cap_debugfs_fops);
8303 
8304 	return 0;
8305 }
8306 
8307 void l2cap_exit(void)
8308 {
8309 	debugfs_remove(l2cap_debugfs);
8310 	hci_unregister_cb(&l2cap_cb);
8311 	l2cap_cleanup_sockets();
8312 }
8313 
8314 module_param(disable_ertm, bool, 0644);
8315 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8316 
8317 module_param(enable_ecred, bool, 0644);
8318 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8319