xref: /linux/net/bluetooth/l2cap_core.c (revision ba95c7452439756d4f6dceb5a188b7c31dbbe5b6)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 				       u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 			   void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 		     struct sk_buff_head *skbs, u8 event);
63 
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65 {
66 	if (link_type == LE_LINK) {
67 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 			return BDADDR_LE_PUBLIC;
69 		else
70 			return BDADDR_LE_RANDOM;
71 	}
72 
73 	return BDADDR_BREDR;
74 }
75 
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77 {
78 	return bdaddr_type(hcon->type, hcon->src_type);
79 }
80 
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82 {
83 	return bdaddr_type(hcon->type, hcon->dst_type);
84 }
85 
86 /* ---- L2CAP channels ---- */
87 
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->dcid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 						   u16 cid)
102 {
103 	struct l2cap_chan *c;
104 
105 	list_for_each_entry(c, &conn->chan_l, list) {
106 		if (c->scid == cid)
107 			return c;
108 	}
109 	return NULL;
110 }
111 
112 /* Find channel with given SCID.
113  * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 						 u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	mutex_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		l2cap_chan_lock(c);
123 	mutex_unlock(&conn->chan_lock);
124 
125 	return c;
126 }
127 
128 /* Find channel with given DCID.
129  * Returns locked channel.
130  */
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 						 u16 cid)
133 {
134 	struct l2cap_chan *c;
135 
136 	mutex_lock(&conn->chan_lock);
137 	c = __l2cap_get_chan_by_dcid(conn, cid);
138 	if (c)
139 		l2cap_chan_lock(c);
140 	mutex_unlock(&conn->chan_lock);
141 
142 	return c;
143 }
144 
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						    u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &conn->chan_l, list) {
151 		if (c->ident == ident)
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 						  u8 ident)
159 {
160 	struct l2cap_chan *c;
161 
162 	mutex_lock(&conn->chan_lock);
163 	c = __l2cap_get_chan_by_ident(conn, ident);
164 	if (c)
165 		l2cap_chan_lock(c);
166 	mutex_unlock(&conn->chan_lock);
167 
168 	return c;
169 }
170 
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
172 {
173 	struct l2cap_chan *c;
174 
175 	list_for_each_entry(c, &chan_list, global_l) {
176 		if (c->sport == psm && !bacmp(&c->src, src))
177 			return c;
178 	}
179 	return NULL;
180 }
181 
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183 {
184 	int err;
185 
186 	write_lock(&chan_list_lock);
187 
188 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
189 		err = -EADDRINUSE;
190 		goto done;
191 	}
192 
193 	if (psm) {
194 		chan->psm = psm;
195 		chan->sport = psm;
196 		err = 0;
197 	} else {
198 		u16 p, start, end, incr;
199 
200 		if (chan->src_type == BDADDR_BREDR) {
201 			start = L2CAP_PSM_DYN_START;
202 			end = L2CAP_PSM_AUTO_END;
203 			incr = 2;
204 		} else {
205 			start = L2CAP_PSM_LE_DYN_START;
206 			end = L2CAP_PSM_LE_DYN_END;
207 			incr = 1;
208 		}
209 
210 		err = -EINVAL;
211 		for (p = start; p <= end; p += incr)
212 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 				chan->psm   = cpu_to_le16(p);
214 				chan->sport = cpu_to_le16(p);
215 				err = 0;
216 				break;
217 			}
218 	}
219 
220 done:
221 	write_unlock(&chan_list_lock);
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(l2cap_add_psm);
225 
226 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
227 {
228 	write_lock(&chan_list_lock);
229 
230 	/* Override the defaults (which are for conn-oriented) */
231 	chan->omtu = L2CAP_DEFAULT_MTU;
232 	chan->chan_type = L2CAP_CHAN_FIXED;
233 
234 	chan->scid = scid;
235 
236 	write_unlock(&chan_list_lock);
237 
238 	return 0;
239 }
240 
241 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
242 {
243 	u16 cid, dyn_end;
244 
245 	if (conn->hcon->type == LE_LINK)
246 		dyn_end = L2CAP_CID_LE_DYN_END;
247 	else
248 		dyn_end = L2CAP_CID_DYN_END;
249 
250 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 		if (!__l2cap_get_chan_by_scid(conn, cid))
252 			return cid;
253 	}
254 
255 	return 0;
256 }
257 
258 static void l2cap_state_change(struct l2cap_chan *chan, int state)
259 {
260 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 	       state_to_string(state));
262 
263 	chan->state = state;
264 	chan->ops->state_change(chan, state, 0);
265 }
266 
267 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
268 						int state, int err)
269 {
270 	chan->state = state;
271 	chan->ops->state_change(chan, chan->state, err);
272 }
273 
274 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
275 {
276 	chan->ops->state_change(chan, chan->state, err);
277 }
278 
279 static void __set_retrans_timer(struct l2cap_chan *chan)
280 {
281 	if (!delayed_work_pending(&chan->monitor_timer) &&
282 	    chan->retrans_timeout) {
283 		l2cap_set_timer(chan, &chan->retrans_timer,
284 				msecs_to_jiffies(chan->retrans_timeout));
285 	}
286 }
287 
288 static void __set_monitor_timer(struct l2cap_chan *chan)
289 {
290 	__clear_retrans_timer(chan);
291 	if (chan->monitor_timeout) {
292 		l2cap_set_timer(chan, &chan->monitor_timer,
293 				msecs_to_jiffies(chan->monitor_timeout));
294 	}
295 }
296 
297 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
298 					       u16 seq)
299 {
300 	struct sk_buff *skb;
301 
302 	skb_queue_walk(head, skb) {
303 		if (bt_cb(skb)->l2cap.txseq == seq)
304 			return skb;
305 	}
306 
307 	return NULL;
308 }
309 
310 /* ---- L2CAP sequence number lists ---- */
311 
312 /* For ERTM, ordered lists of sequence numbers must be tracked for
313  * SREJ requests that are received and for frames that are to be
314  * retransmitted. These seq_list functions implement a singly-linked
315  * list in an array, where membership in the list can also be checked
316  * in constant time. Items can also be added to the tail of the list
317  * and removed from the head in constant time, without further memory
318  * allocs or frees.
319  */
320 
321 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
322 {
323 	size_t alloc_size, i;
324 
325 	/* Allocated size is a power of 2 to map sequence numbers
326 	 * (which may be up to 14 bits) in to a smaller array that is
327 	 * sized for the negotiated ERTM transmit windows.
328 	 */
329 	alloc_size = roundup_pow_of_two(size);
330 
331 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
332 	if (!seq_list->list)
333 		return -ENOMEM;
334 
335 	seq_list->mask = alloc_size - 1;
336 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 	for (i = 0; i < alloc_size; i++)
339 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
340 
341 	return 0;
342 }
343 
344 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
345 {
346 	kfree(seq_list->list);
347 }
348 
349 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
350 					   u16 seq)
351 {
352 	/* Constant-time check for list membership */
353 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
354 }
355 
356 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
357 {
358 	u16 seq = seq_list->head;
359 	u16 mask = seq_list->mask;
360 
361 	seq_list->head = seq_list->list[seq & mask];
362 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
363 
364 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 	}
368 
369 	return seq;
370 }
371 
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
373 {
374 	u16 i;
375 
376 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
377 		return;
378 
379 	for (i = 0; i <= seq_list->mask; i++)
380 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
381 
382 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
384 }
385 
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
387 {
388 	u16 mask = seq_list->mask;
389 
390 	/* All appends happen in constant time */
391 
392 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
393 		return;
394 
395 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 		seq_list->head = seq;
397 	else
398 		seq_list->list[seq_list->tail & mask] = seq;
399 
400 	seq_list->tail = seq;
401 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
402 }
403 
404 static void l2cap_chan_timeout(struct work_struct *work)
405 {
406 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
407 					       chan_timer.work);
408 	struct l2cap_conn *conn = chan->conn;
409 	int reason;
410 
411 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
412 
413 	mutex_lock(&conn->chan_lock);
414 	l2cap_chan_lock(chan);
415 
416 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 		reason = ECONNREFUSED;
418 	else if (chan->state == BT_CONNECT &&
419 		 chan->sec_level != BT_SECURITY_SDP)
420 		reason = ECONNREFUSED;
421 	else
422 		reason = ETIMEDOUT;
423 
424 	l2cap_chan_close(chan, reason);
425 
426 	l2cap_chan_unlock(chan);
427 
428 	chan->ops->close(chan);
429 	mutex_unlock(&conn->chan_lock);
430 
431 	l2cap_chan_put(chan);
432 }
433 
434 struct l2cap_chan *l2cap_chan_create(void)
435 {
436 	struct l2cap_chan *chan;
437 
438 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
439 	if (!chan)
440 		return NULL;
441 
442 	mutex_init(&chan->lock);
443 
444 	/* Set default lock nesting level */
445 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
446 
447 	write_lock(&chan_list_lock);
448 	list_add(&chan->global_l, &chan_list);
449 	write_unlock(&chan_list_lock);
450 
451 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
452 
453 	chan->state = BT_OPEN;
454 
455 	kref_init(&chan->kref);
456 
457 	/* This flag is cleared in l2cap_chan_ready() */
458 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
459 
460 	BT_DBG("chan %p", chan);
461 
462 	return chan;
463 }
464 EXPORT_SYMBOL_GPL(l2cap_chan_create);
465 
466 static void l2cap_chan_destroy(struct kref *kref)
467 {
468 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
469 
470 	BT_DBG("chan %p", chan);
471 
472 	write_lock(&chan_list_lock);
473 	list_del(&chan->global_l);
474 	write_unlock(&chan_list_lock);
475 
476 	kfree(chan);
477 }
478 
479 void l2cap_chan_hold(struct l2cap_chan *c)
480 {
481 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
482 
483 	kref_get(&c->kref);
484 }
485 
486 void l2cap_chan_put(struct l2cap_chan *c)
487 {
488 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
489 
490 	kref_put(&c->kref, l2cap_chan_destroy);
491 }
492 EXPORT_SYMBOL_GPL(l2cap_chan_put);
493 
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
495 {
496 	chan->fcs  = L2CAP_FCS_CRC16;
497 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 	chan->remote_max_tx = chan->max_tx;
501 	chan->remote_tx_win = chan->tx_win;
502 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 	chan->sec_level = BT_SECURITY_LOW;
504 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 	chan->conf_state = 0;
508 
509 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 }
511 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512 
513 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
514 {
515 	chan->sdu = NULL;
516 	chan->sdu_last_frag = NULL;
517 	chan->sdu_len = 0;
518 	chan->tx_credits = tx_credits;
519 	/* Derive MPS from connection MTU to stop HCI fragmentation */
520 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 	/* Give enough credits for a full packet */
522 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
523 
524 	skb_queue_head_init(&chan->tx_q);
525 }
526 
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
528 {
529 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 	       __le16_to_cpu(chan->psm), chan->dcid);
531 
532 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 
534 	chan->conn = conn;
535 
536 	switch (chan->chan_type) {
537 	case L2CAP_CHAN_CONN_ORIENTED:
538 		/* Alloc CID for connection-oriented socket */
539 		chan->scid = l2cap_alloc_cid(conn);
540 		if (conn->hcon->type == ACL_LINK)
541 			chan->omtu = L2CAP_DEFAULT_MTU;
542 		break;
543 
544 	case L2CAP_CHAN_CONN_LESS:
545 		/* Connectionless socket */
546 		chan->scid = L2CAP_CID_CONN_LESS;
547 		chan->dcid = L2CAP_CID_CONN_LESS;
548 		chan->omtu = L2CAP_DEFAULT_MTU;
549 		break;
550 
551 	case L2CAP_CHAN_FIXED:
552 		/* Caller will set CID and CID specific MTU values */
553 		break;
554 
555 	default:
556 		/* Raw socket can send/recv signalling messages only */
557 		chan->scid = L2CAP_CID_SIGNALING;
558 		chan->dcid = L2CAP_CID_SIGNALING;
559 		chan->omtu = L2CAP_DEFAULT_MTU;
560 	}
561 
562 	chan->local_id		= L2CAP_BESTEFFORT_ID;
563 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
564 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
565 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
566 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
567 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
568 
569 	l2cap_chan_hold(chan);
570 
571 	/* Only keep a reference for fixed channels if they requested it */
572 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 		hci_conn_hold(conn->hcon);
575 
576 	list_add(&chan->list, &conn->chan_l);
577 }
578 
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
580 {
581 	mutex_lock(&conn->chan_lock);
582 	__l2cap_chan_add(conn, chan);
583 	mutex_unlock(&conn->chan_lock);
584 }
585 
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
587 {
588 	struct l2cap_conn *conn = chan->conn;
589 
590 	__clear_chan_timer(chan);
591 
592 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 	       state_to_string(chan->state));
594 
595 	chan->ops->teardown(chan, err);
596 
597 	if (conn) {
598 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 		/* Delete from channel list */
600 		list_del(&chan->list);
601 
602 		l2cap_chan_put(chan);
603 
604 		chan->conn = NULL;
605 
606 		/* Reference was only held for non-fixed channels or
607 		 * fixed channels that explicitly requested it using the
608 		 * FLAG_HOLD_HCI_CONN flag.
609 		 */
610 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 			hci_conn_drop(conn->hcon);
613 
614 		if (mgr && mgr->bredr_chan == chan)
615 			mgr->bredr_chan = NULL;
616 	}
617 
618 	if (chan->hs_hchan) {
619 		struct hci_chan *hs_hchan = chan->hs_hchan;
620 
621 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 		amp_disconnect_logical_link(hs_hchan);
623 	}
624 
625 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 		return;
627 
628 	switch(chan->mode) {
629 	case L2CAP_MODE_BASIC:
630 		break;
631 
632 	case L2CAP_MODE_LE_FLOWCTL:
633 		skb_queue_purge(&chan->tx_q);
634 		break;
635 
636 	case L2CAP_MODE_ERTM:
637 		__clear_retrans_timer(chan);
638 		__clear_monitor_timer(chan);
639 		__clear_ack_timer(chan);
640 
641 		skb_queue_purge(&chan->srej_q);
642 
643 		l2cap_seq_list_free(&chan->srej_list);
644 		l2cap_seq_list_free(&chan->retrans_list);
645 
646 		/* fall through */
647 
648 	case L2CAP_MODE_STREAMING:
649 		skb_queue_purge(&chan->tx_q);
650 		break;
651 	}
652 
653 	return;
654 }
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
656 
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
658 {
659 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 					       id_addr_update_work);
661 	struct hci_conn *hcon = conn->hcon;
662 	struct l2cap_chan *chan;
663 
664 	mutex_lock(&conn->chan_lock);
665 
666 	list_for_each_entry(chan, &conn->chan_l, list) {
667 		l2cap_chan_lock(chan);
668 		bacpy(&chan->dst, &hcon->dst);
669 		chan->dst_type = bdaddr_dst_type(hcon);
670 		l2cap_chan_unlock(chan);
671 	}
672 
673 	mutex_unlock(&conn->chan_lock);
674 }
675 
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_le_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_LE_AUTHORIZATION;
684 	else
685 		result = L2CAP_CR_LE_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.dcid    = cpu_to_le16(chan->scid);
690 	rsp.mtu     = cpu_to_le16(chan->imtu);
691 	rsp.mps     = cpu_to_le16(chan->mps);
692 	rsp.credits = cpu_to_le16(chan->rx_credits);
693 	rsp.result  = cpu_to_le16(result);
694 
695 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 		       &rsp);
697 }
698 
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
700 {
701 	struct l2cap_conn *conn = chan->conn;
702 	struct l2cap_conn_rsp rsp;
703 	u16 result;
704 
705 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 		result = L2CAP_CR_SEC_BLOCK;
707 	else
708 		result = L2CAP_CR_BAD_PSM;
709 
710 	l2cap_state_change(chan, BT_DISCONN);
711 
712 	rsp.scid   = cpu_to_le16(chan->dcid);
713 	rsp.dcid   = cpu_to_le16(chan->scid);
714 	rsp.result = cpu_to_le16(result);
715 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
716 
717 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
718 }
719 
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
721 {
722 	struct l2cap_conn *conn = chan->conn;
723 
724 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
725 
726 	switch (chan->state) {
727 	case BT_LISTEN:
728 		chan->ops->teardown(chan, 0);
729 		break;
730 
731 	case BT_CONNECTED:
732 	case BT_CONFIG:
733 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 			l2cap_send_disconn_req(chan, reason);
736 		} else
737 			l2cap_chan_del(chan, reason);
738 		break;
739 
740 	case BT_CONNECT2:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			if (conn->hcon->type == ACL_LINK)
743 				l2cap_chan_connect_reject(chan);
744 			else if (conn->hcon->type == LE_LINK)
745 				l2cap_chan_le_connect_reject(chan);
746 		}
747 
748 		l2cap_chan_del(chan, reason);
749 		break;
750 
751 	case BT_CONNECT:
752 	case BT_DISCONN:
753 		l2cap_chan_del(chan, reason);
754 		break;
755 
756 	default:
757 		chan->ops->teardown(chan, 0);
758 		break;
759 	}
760 }
761 EXPORT_SYMBOL(l2cap_chan_close);
762 
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
764 {
765 	switch (chan->chan_type) {
766 	case L2CAP_CHAN_RAW:
767 		switch (chan->sec_level) {
768 		case BT_SECURITY_HIGH:
769 		case BT_SECURITY_FIPS:
770 			return HCI_AT_DEDICATED_BONDING_MITM;
771 		case BT_SECURITY_MEDIUM:
772 			return HCI_AT_DEDICATED_BONDING;
773 		default:
774 			return HCI_AT_NO_BONDING;
775 		}
776 		break;
777 	case L2CAP_CHAN_CONN_LESS:
778 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 			if (chan->sec_level == BT_SECURITY_LOW)
780 				chan->sec_level = BT_SECURITY_SDP;
781 		}
782 		if (chan->sec_level == BT_SECURITY_HIGH ||
783 		    chan->sec_level == BT_SECURITY_FIPS)
784 			return HCI_AT_NO_BONDING_MITM;
785 		else
786 			return HCI_AT_NO_BONDING;
787 		break;
788 	case L2CAP_CHAN_CONN_ORIENTED:
789 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 			if (chan->sec_level == BT_SECURITY_LOW)
791 				chan->sec_level = BT_SECURITY_SDP;
792 
793 			if (chan->sec_level == BT_SECURITY_HIGH ||
794 			    chan->sec_level == BT_SECURITY_FIPS)
795 				return HCI_AT_NO_BONDING_MITM;
796 			else
797 				return HCI_AT_NO_BONDING;
798 		}
799 		/* fall through */
800 	default:
801 		switch (chan->sec_level) {
802 		case BT_SECURITY_HIGH:
803 		case BT_SECURITY_FIPS:
804 			return HCI_AT_GENERAL_BONDING_MITM;
805 		case BT_SECURITY_MEDIUM:
806 			return HCI_AT_GENERAL_BONDING;
807 		default:
808 			return HCI_AT_NO_BONDING;
809 		}
810 		break;
811 	}
812 }
813 
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 	__u8 auth_type;
819 
820 	if (conn->hcon->type == LE_LINK)
821 		return smp_conn_security(conn->hcon, chan->sec_level);
822 
823 	auth_type = l2cap_get_auth_type(chan);
824 
825 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 				 initiator);
827 }
828 
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 {
831 	u8 id;
832 
833 	/* Get next available identificator.
834 	 *    1 - 128 are used by kernel.
835 	 *  129 - 199 are reserved.
836 	 *  200 - 254 are used by utilities like l2ping, etc.
837 	 */
838 
839 	mutex_lock(&conn->ident_lock);
840 
841 	if (++conn->tx_ident > 128)
842 		conn->tx_ident = 1;
843 
844 	id = conn->tx_ident;
845 
846 	mutex_unlock(&conn->ident_lock);
847 
848 	return id;
849 }
850 
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
852 			   void *data)
853 {
854 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
855 	u8 flags;
856 
857 	BT_DBG("code 0x%2.2x", code);
858 
859 	if (!skb)
860 		return;
861 
862 	/* Use NO_FLUSH if supported or we have an LE link (which does
863 	 * not support auto-flushing packets) */
864 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 	    conn->hcon->type == LE_LINK)
866 		flags = ACL_START_NO_FLUSH;
867 	else
868 		flags = ACL_START;
869 
870 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 	skb->priority = HCI_PRIO_MAX;
872 
873 	hci_send_acl(conn->hchan, skb, flags);
874 }
875 
876 static bool __chan_is_moving(struct l2cap_chan *chan)
877 {
878 	return chan->move_state != L2CAP_MOVE_STABLE &&
879 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
880 }
881 
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
883 {
884 	struct hci_conn *hcon = chan->conn->hcon;
885 	u16 flags;
886 
887 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
888 	       skb->priority);
889 
890 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
891 		if (chan->hs_hchan)
892 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
893 		else
894 			kfree_skb(skb);
895 
896 		return;
897 	}
898 
899 	/* Use NO_FLUSH for LE links (where this is the only option) or
900 	 * if the BR/EDR link supports it and flushing has not been
901 	 * explicitly requested (through FLAG_FLUSHABLE).
902 	 */
903 	if (hcon->type == LE_LINK ||
904 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 	     lmp_no_flush_capable(hcon->hdev)))
906 		flags = ACL_START_NO_FLUSH;
907 	else
908 		flags = ACL_START;
909 
910 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 	hci_send_acl(chan->conn->hchan, skb, flags);
912 }
913 
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
915 {
916 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
918 
919 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
920 		/* S-Frame */
921 		control->sframe = 1;
922 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
924 
925 		control->sar = 0;
926 		control->txseq = 0;
927 	} else {
928 		/* I-Frame */
929 		control->sframe = 0;
930 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
932 
933 		control->poll = 0;
934 		control->super = 0;
935 	}
936 }
937 
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
939 {
940 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
942 
943 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
944 		/* S-Frame */
945 		control->sframe = 1;
946 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
948 
949 		control->sar = 0;
950 		control->txseq = 0;
951 	} else {
952 		/* I-Frame */
953 		control->sframe = 0;
954 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
956 
957 		control->poll = 0;
958 		control->super = 0;
959 	}
960 }
961 
962 static inline void __unpack_control(struct l2cap_chan *chan,
963 				    struct sk_buff *skb)
964 {
965 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 		__unpack_extended_control(get_unaligned_le32(skb->data),
967 					  &bt_cb(skb)->l2cap);
968 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
969 	} else {
970 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 	}
974 }
975 
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 {
978 	u32 packed;
979 
980 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
982 
983 	if (control->sframe) {
984 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
987 	} else {
988 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
990 	}
991 
992 	return packed;
993 }
994 
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 {
997 	u16 packed;
998 
999 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1001 
1002 	if (control->sframe) {
1003 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 		packed |= L2CAP_CTRL_FRAME_TYPE;
1006 	} else {
1007 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1009 	}
1010 
1011 	return packed;
1012 }
1013 
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 				  struct l2cap_ctrl *control,
1016 				  struct sk_buff *skb)
1017 {
1018 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 		put_unaligned_le32(__pack_extended_control(control),
1020 				   skb->data + L2CAP_HDR_SIZE);
1021 	} else {
1022 		put_unaligned_le16(__pack_enhanced_control(control),
1023 				   skb->data + L2CAP_HDR_SIZE);
1024 	}
1025 }
1026 
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1028 {
1029 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 		return L2CAP_EXT_HDR_SIZE;
1031 	else
1032 		return L2CAP_ENH_HDR_SIZE;
1033 }
1034 
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1036 					       u32 control)
1037 {
1038 	struct sk_buff *skb;
1039 	struct l2cap_hdr *lh;
1040 	int hlen = __ertm_hdr_size(chan);
1041 
1042 	if (chan->fcs == L2CAP_FCS_CRC16)
1043 		hlen += L2CAP_FCS_SIZE;
1044 
1045 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1046 
1047 	if (!skb)
1048 		return ERR_PTR(-ENOMEM);
1049 
1050 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 	lh->cid = cpu_to_le16(chan->dcid);
1053 
1054 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1056 	else
1057 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1058 
1059 	if (chan->fcs == L2CAP_FCS_CRC16) {
1060 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1062 	}
1063 
1064 	skb->priority = HCI_PRIO_MAX;
1065 	return skb;
1066 }
1067 
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 			      struct l2cap_ctrl *control)
1070 {
1071 	struct sk_buff *skb;
1072 	u32 control_field;
1073 
1074 	BT_DBG("chan %p, control %p", chan, control);
1075 
1076 	if (!control->sframe)
1077 		return;
1078 
1079 	if (__chan_is_moving(chan))
1080 		return;
1081 
1082 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 	    !control->poll)
1084 		control->final = 1;
1085 
1086 	if (control->super == L2CAP_SUPER_RR)
1087 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 	else if (control->super == L2CAP_SUPER_RNR)
1089 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1090 
1091 	if (control->super != L2CAP_SUPER_SREJ) {
1092 		chan->last_acked_seq = control->reqseq;
1093 		__clear_ack_timer(chan);
1094 	}
1095 
1096 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 	       control->final, control->poll, control->super);
1098 
1099 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 		control_field = __pack_extended_control(control);
1101 	else
1102 		control_field = __pack_enhanced_control(control);
1103 
1104 	skb = l2cap_create_sframe_pdu(chan, control_field);
1105 	if (!IS_ERR(skb))
1106 		l2cap_do_send(chan, skb);
1107 }
1108 
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1110 {
1111 	struct l2cap_ctrl control;
1112 
1113 	BT_DBG("chan %p, poll %d", chan, poll);
1114 
1115 	memset(&control, 0, sizeof(control));
1116 	control.sframe = 1;
1117 	control.poll = poll;
1118 
1119 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 		control.super = L2CAP_SUPER_RNR;
1121 	else
1122 		control.super = L2CAP_SUPER_RR;
1123 
1124 	control.reqseq = chan->buffer_seq;
1125 	l2cap_send_sframe(chan, &control);
1126 }
1127 
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1129 {
1130 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1131 		return true;
1132 
1133 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1134 }
1135 
1136 static bool __amp_capable(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 	struct hci_dev *hdev;
1140 	bool amp_available = false;
1141 
1142 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1143 		return false;
1144 
1145 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1146 		return false;
1147 
1148 	read_lock(&hci_dev_list_lock);
1149 	list_for_each_entry(hdev, &hci_dev_list, list) {
1150 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 		    test_bit(HCI_UP, &hdev->flags)) {
1152 			amp_available = true;
1153 			break;
1154 		}
1155 	}
1156 	read_unlock(&hci_dev_list_lock);
1157 
1158 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 		return amp_available;
1160 
1161 	return false;
1162 }
1163 
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1165 {
1166 	/* Check EFS parameters */
1167 	return true;
1168 }
1169 
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1171 {
1172 	struct l2cap_conn *conn = chan->conn;
1173 	struct l2cap_conn_req req;
1174 
1175 	req.scid = cpu_to_le16(chan->scid);
1176 	req.psm  = chan->psm;
1177 
1178 	chan->ident = l2cap_get_ident(conn);
1179 
1180 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1181 
1182 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1183 }
1184 
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1186 {
1187 	struct l2cap_create_chan_req req;
1188 	req.scid = cpu_to_le16(chan->scid);
1189 	req.psm  = chan->psm;
1190 	req.amp_id = amp_id;
1191 
1192 	chan->ident = l2cap_get_ident(chan->conn);
1193 
1194 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 		       sizeof(req), &req);
1196 }
1197 
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1199 {
1200 	struct sk_buff *skb;
1201 
1202 	BT_DBG("chan %p", chan);
1203 
1204 	if (chan->mode != L2CAP_MODE_ERTM)
1205 		return;
1206 
1207 	__clear_retrans_timer(chan);
1208 	__clear_monitor_timer(chan);
1209 	__clear_ack_timer(chan);
1210 
1211 	chan->retry_count = 0;
1212 	skb_queue_walk(&chan->tx_q, skb) {
1213 		if (bt_cb(skb)->l2cap.retries)
1214 			bt_cb(skb)->l2cap.retries = 1;
1215 		else
1216 			break;
1217 	}
1218 
1219 	chan->expected_tx_seq = chan->buffer_seq;
1220 
1221 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 	l2cap_seq_list_clear(&chan->retrans_list);
1224 	l2cap_seq_list_clear(&chan->srej_list);
1225 	skb_queue_purge(&chan->srej_q);
1226 
1227 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1229 
1230 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1231 }
1232 
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1234 {
1235 	u8 move_role = chan->move_role;
1236 	BT_DBG("chan %p", chan);
1237 
1238 	chan->move_state = L2CAP_MOVE_STABLE;
1239 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1240 
1241 	if (chan->mode != L2CAP_MODE_ERTM)
1242 		return;
1243 
1244 	switch (move_role) {
1245 	case L2CAP_MOVE_ROLE_INITIATOR:
1246 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1248 		break;
1249 	case L2CAP_MOVE_ROLE_RESPONDER:
1250 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1251 		break;
1252 	}
1253 }
1254 
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1256 {
1257 	/* The channel may have already been flagged as connected in
1258 	 * case of receiving data before the L2CAP info req/rsp
1259 	 * procedure is complete.
1260 	 */
1261 	if (chan->state == BT_CONNECTED)
1262 		return;
1263 
1264 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 	chan->conf_state = 0;
1266 	__clear_chan_timer(chan);
1267 
1268 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 		chan->ops->suspend(chan);
1270 
1271 	chan->state = BT_CONNECTED;
1272 
1273 	chan->ops->ready(chan);
1274 }
1275 
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_le_conn_req req;
1280 
1281 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1282 		return;
1283 
1284 	l2cap_le_flowctl_init(chan, 0);
1285 
1286 	req.psm     = chan->psm;
1287 	req.scid    = cpu_to_le16(chan->scid);
1288 	req.mtu     = cpu_to_le16(chan->imtu);
1289 	req.mps     = cpu_to_le16(chan->mps);
1290 	req.credits = cpu_to_le16(chan->rx_credits);
1291 
1292 	chan->ident = l2cap_get_ident(conn);
1293 
1294 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1295 		       sizeof(req), &req);
1296 }
1297 
1298 static void l2cap_le_start(struct l2cap_chan *chan)
1299 {
1300 	struct l2cap_conn *conn = chan->conn;
1301 
1302 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1303 		return;
1304 
1305 	if (!chan->psm) {
1306 		l2cap_chan_ready(chan);
1307 		return;
1308 	}
1309 
1310 	if (chan->state == BT_CONNECT)
1311 		l2cap_le_connect(chan);
1312 }
1313 
1314 static void l2cap_start_connection(struct l2cap_chan *chan)
1315 {
1316 	if (__amp_capable(chan)) {
1317 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 		a2mp_discover_amp(chan);
1319 	} else if (chan->conn->hcon->type == LE_LINK) {
1320 		l2cap_le_start(chan);
1321 	} else {
1322 		l2cap_send_conn_req(chan);
1323 	}
1324 }
1325 
1326 static void l2cap_request_info(struct l2cap_conn *conn)
1327 {
1328 	struct l2cap_info_req req;
1329 
1330 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1331 		return;
1332 
1333 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1334 
1335 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 	conn->info_ident = l2cap_get_ident(conn);
1337 
1338 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1339 
1340 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1341 		       sizeof(req), &req);
1342 }
1343 
1344 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1345 {
1346 	/* The minimum encryption key size needs to be enforced by the
1347 	 * host stack before establishing any L2CAP connections. The
1348 	 * specification in theory allows a minimum of 1, but to align
1349 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1350 	 *
1351 	 * This check might also be called for unencrypted connections
1352 	 * that have no key size requirements. Ensure that the link is
1353 	 * actually encrypted before enforcing a key size.
1354 	 */
1355 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1356 		hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE);
1357 }
1358 
1359 static void l2cap_do_start(struct l2cap_chan *chan)
1360 {
1361 	struct l2cap_conn *conn = chan->conn;
1362 
1363 	if (conn->hcon->type == LE_LINK) {
1364 		l2cap_le_start(chan);
1365 		return;
1366 	}
1367 
1368 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1369 		l2cap_request_info(conn);
1370 		return;
1371 	}
1372 
1373 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1374 		return;
1375 
1376 	if (!l2cap_chan_check_security(chan, true) ||
1377 	    !__l2cap_no_conn_pending(chan))
1378 		return;
1379 
1380 	if (l2cap_check_enc_key_size(conn->hcon))
1381 		l2cap_start_connection(chan);
1382 	else
1383 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1384 }
1385 
1386 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1387 {
1388 	u32 local_feat_mask = l2cap_feat_mask;
1389 	if (!disable_ertm)
1390 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1391 
1392 	switch (mode) {
1393 	case L2CAP_MODE_ERTM:
1394 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1395 	case L2CAP_MODE_STREAMING:
1396 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1397 	default:
1398 		return 0x00;
1399 	}
1400 }
1401 
1402 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1403 {
1404 	struct l2cap_conn *conn = chan->conn;
1405 	struct l2cap_disconn_req req;
1406 
1407 	if (!conn)
1408 		return;
1409 
1410 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1411 		__clear_retrans_timer(chan);
1412 		__clear_monitor_timer(chan);
1413 		__clear_ack_timer(chan);
1414 	}
1415 
1416 	if (chan->scid == L2CAP_CID_A2MP) {
1417 		l2cap_state_change(chan, BT_DISCONN);
1418 		return;
1419 	}
1420 
1421 	req.dcid = cpu_to_le16(chan->dcid);
1422 	req.scid = cpu_to_le16(chan->scid);
1423 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1424 		       sizeof(req), &req);
1425 
1426 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1427 }
1428 
1429 /* ---- L2CAP connections ---- */
1430 static void l2cap_conn_start(struct l2cap_conn *conn)
1431 {
1432 	struct l2cap_chan *chan, *tmp;
1433 
1434 	BT_DBG("conn %p", conn);
1435 
1436 	mutex_lock(&conn->chan_lock);
1437 
1438 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1439 		l2cap_chan_lock(chan);
1440 
1441 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1442 			l2cap_chan_ready(chan);
1443 			l2cap_chan_unlock(chan);
1444 			continue;
1445 		}
1446 
1447 		if (chan->state == BT_CONNECT) {
1448 			if (!l2cap_chan_check_security(chan, true) ||
1449 			    !__l2cap_no_conn_pending(chan)) {
1450 				l2cap_chan_unlock(chan);
1451 				continue;
1452 			}
1453 
1454 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1455 			    && test_bit(CONF_STATE2_DEVICE,
1456 					&chan->conf_state)) {
1457 				l2cap_chan_close(chan, ECONNRESET);
1458 				l2cap_chan_unlock(chan);
1459 				continue;
1460 			}
1461 
1462 			if (l2cap_check_enc_key_size(conn->hcon))
1463 				l2cap_start_connection(chan);
1464 			else
1465 				l2cap_chan_close(chan, ECONNREFUSED);
1466 
1467 		} else if (chan->state == BT_CONNECT2) {
1468 			struct l2cap_conn_rsp rsp;
1469 			char buf[128];
1470 			rsp.scid = cpu_to_le16(chan->dcid);
1471 			rsp.dcid = cpu_to_le16(chan->scid);
1472 
1473 			if (l2cap_chan_check_security(chan, false)) {
1474 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1475 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1476 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1477 					chan->ops->defer(chan);
1478 
1479 				} else {
1480 					l2cap_state_change(chan, BT_CONFIG);
1481 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1482 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1483 				}
1484 			} else {
1485 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1486 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1487 			}
1488 
1489 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1490 				       sizeof(rsp), &rsp);
1491 
1492 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1493 			    rsp.result != L2CAP_CR_SUCCESS) {
1494 				l2cap_chan_unlock(chan);
1495 				continue;
1496 			}
1497 
1498 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1499 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1500 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1501 			chan->num_conf_req++;
1502 		}
1503 
1504 		l2cap_chan_unlock(chan);
1505 	}
1506 
1507 	mutex_unlock(&conn->chan_lock);
1508 }
1509 
1510 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1511 {
1512 	struct hci_conn *hcon = conn->hcon;
1513 	struct hci_dev *hdev = hcon->hdev;
1514 
1515 	BT_DBG("%s conn %p", hdev->name, conn);
1516 
1517 	/* For outgoing pairing which doesn't necessarily have an
1518 	 * associated socket (e.g. mgmt_pair_device).
1519 	 */
1520 	if (hcon->out)
1521 		smp_conn_security(hcon, hcon->pending_sec_level);
1522 
1523 	/* For LE slave connections, make sure the connection interval
1524 	 * is in the range of the minium and maximum interval that has
1525 	 * been configured for this connection. If not, then trigger
1526 	 * the connection update procedure.
1527 	 */
1528 	if (hcon->role == HCI_ROLE_SLAVE &&
1529 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1530 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1531 		struct l2cap_conn_param_update_req req;
1532 
1533 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1534 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1535 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1536 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1537 
1538 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1539 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1540 	}
1541 }
1542 
1543 static void l2cap_conn_ready(struct l2cap_conn *conn)
1544 {
1545 	struct l2cap_chan *chan;
1546 	struct hci_conn *hcon = conn->hcon;
1547 
1548 	BT_DBG("conn %p", conn);
1549 
1550 	if (hcon->type == ACL_LINK)
1551 		l2cap_request_info(conn);
1552 
1553 	mutex_lock(&conn->chan_lock);
1554 
1555 	list_for_each_entry(chan, &conn->chan_l, list) {
1556 
1557 		l2cap_chan_lock(chan);
1558 
1559 		if (chan->scid == L2CAP_CID_A2MP) {
1560 			l2cap_chan_unlock(chan);
1561 			continue;
1562 		}
1563 
1564 		if (hcon->type == LE_LINK) {
1565 			l2cap_le_start(chan);
1566 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1567 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1568 				l2cap_chan_ready(chan);
1569 		} else if (chan->state == BT_CONNECT) {
1570 			l2cap_do_start(chan);
1571 		}
1572 
1573 		l2cap_chan_unlock(chan);
1574 	}
1575 
1576 	mutex_unlock(&conn->chan_lock);
1577 
1578 	if (hcon->type == LE_LINK)
1579 		l2cap_le_conn_ready(conn);
1580 
1581 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1582 }
1583 
1584 /* Notify sockets that we cannot guaranty reliability anymore */
1585 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1586 {
1587 	struct l2cap_chan *chan;
1588 
1589 	BT_DBG("conn %p", conn);
1590 
1591 	mutex_lock(&conn->chan_lock);
1592 
1593 	list_for_each_entry(chan, &conn->chan_l, list) {
1594 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1595 			l2cap_chan_set_err(chan, err);
1596 	}
1597 
1598 	mutex_unlock(&conn->chan_lock);
1599 }
1600 
1601 static void l2cap_info_timeout(struct work_struct *work)
1602 {
1603 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1604 					       info_timer.work);
1605 
1606 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1607 	conn->info_ident = 0;
1608 
1609 	l2cap_conn_start(conn);
1610 }
1611 
1612 /*
1613  * l2cap_user
1614  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1615  * callback is called during registration. The ->remove callback is called
1616  * during unregistration.
1617  * An l2cap_user object can either be explicitly unregistered or when the
1618  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1619  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1620  * External modules must own a reference to the l2cap_conn object if they intend
1621  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1622  * any time if they don't.
1623  */
1624 
1625 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1626 {
1627 	struct hci_dev *hdev = conn->hcon->hdev;
1628 	int ret;
1629 
1630 	/* We need to check whether l2cap_conn is registered. If it is not, we
1631 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1632 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1633 	 * relies on the parent hci_conn object to be locked. This itself relies
1634 	 * on the hci_dev object to be locked. So we must lock the hci device
1635 	 * here, too. */
1636 
1637 	hci_dev_lock(hdev);
1638 
1639 	if (!list_empty(&user->list)) {
1640 		ret = -EINVAL;
1641 		goto out_unlock;
1642 	}
1643 
1644 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1645 	if (!conn->hchan) {
1646 		ret = -ENODEV;
1647 		goto out_unlock;
1648 	}
1649 
1650 	ret = user->probe(conn, user);
1651 	if (ret)
1652 		goto out_unlock;
1653 
1654 	list_add(&user->list, &conn->users);
1655 	ret = 0;
1656 
1657 out_unlock:
1658 	hci_dev_unlock(hdev);
1659 	return ret;
1660 }
1661 EXPORT_SYMBOL(l2cap_register_user);
1662 
1663 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1664 {
1665 	struct hci_dev *hdev = conn->hcon->hdev;
1666 
1667 	hci_dev_lock(hdev);
1668 
1669 	if (list_empty(&user->list))
1670 		goto out_unlock;
1671 
1672 	list_del_init(&user->list);
1673 	user->remove(conn, user);
1674 
1675 out_unlock:
1676 	hci_dev_unlock(hdev);
1677 }
1678 EXPORT_SYMBOL(l2cap_unregister_user);
1679 
1680 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1681 {
1682 	struct l2cap_user *user;
1683 
1684 	while (!list_empty(&conn->users)) {
1685 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1686 		list_del_init(&user->list);
1687 		user->remove(conn, user);
1688 	}
1689 }
1690 
1691 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1692 {
1693 	struct l2cap_conn *conn = hcon->l2cap_data;
1694 	struct l2cap_chan *chan, *l;
1695 
1696 	if (!conn)
1697 		return;
1698 
1699 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1700 
1701 	kfree_skb(conn->rx_skb);
1702 
1703 	skb_queue_purge(&conn->pending_rx);
1704 
1705 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1706 	 * might block if we are running on a worker from the same workqueue
1707 	 * pending_rx_work is waiting on.
1708 	 */
1709 	if (work_pending(&conn->pending_rx_work))
1710 		cancel_work_sync(&conn->pending_rx_work);
1711 
1712 	if (work_pending(&conn->id_addr_update_work))
1713 		cancel_work_sync(&conn->id_addr_update_work);
1714 
1715 	l2cap_unregister_all_users(conn);
1716 
1717 	/* Force the connection to be immediately dropped */
1718 	hcon->disc_timeout = 0;
1719 
1720 	mutex_lock(&conn->chan_lock);
1721 
1722 	/* Kill channels */
1723 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1724 		l2cap_chan_hold(chan);
1725 		l2cap_chan_lock(chan);
1726 
1727 		l2cap_chan_del(chan, err);
1728 
1729 		l2cap_chan_unlock(chan);
1730 
1731 		chan->ops->close(chan);
1732 		l2cap_chan_put(chan);
1733 	}
1734 
1735 	mutex_unlock(&conn->chan_lock);
1736 
1737 	hci_chan_del(conn->hchan);
1738 
1739 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1740 		cancel_delayed_work_sync(&conn->info_timer);
1741 
1742 	hcon->l2cap_data = NULL;
1743 	conn->hchan = NULL;
1744 	l2cap_conn_put(conn);
1745 }
1746 
1747 static void l2cap_conn_free(struct kref *ref)
1748 {
1749 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1750 
1751 	hci_conn_put(conn->hcon);
1752 	kfree(conn);
1753 }
1754 
1755 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1756 {
1757 	kref_get(&conn->ref);
1758 	return conn;
1759 }
1760 EXPORT_SYMBOL(l2cap_conn_get);
1761 
1762 void l2cap_conn_put(struct l2cap_conn *conn)
1763 {
1764 	kref_put(&conn->ref, l2cap_conn_free);
1765 }
1766 EXPORT_SYMBOL(l2cap_conn_put);
1767 
1768 /* ---- Socket interface ---- */
1769 
1770 /* Find socket with psm and source / destination bdaddr.
1771  * Returns closest match.
1772  */
1773 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1774 						   bdaddr_t *src,
1775 						   bdaddr_t *dst,
1776 						   u8 link_type)
1777 {
1778 	struct l2cap_chan *c, *c1 = NULL;
1779 
1780 	read_lock(&chan_list_lock);
1781 
1782 	list_for_each_entry(c, &chan_list, global_l) {
1783 		if (state && c->state != state)
1784 			continue;
1785 
1786 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1787 			continue;
1788 
1789 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1790 			continue;
1791 
1792 		if (c->psm == psm) {
1793 			int src_match, dst_match;
1794 			int src_any, dst_any;
1795 
1796 			/* Exact match. */
1797 			src_match = !bacmp(&c->src, src);
1798 			dst_match = !bacmp(&c->dst, dst);
1799 			if (src_match && dst_match) {
1800 				l2cap_chan_hold(c);
1801 				read_unlock(&chan_list_lock);
1802 				return c;
1803 			}
1804 
1805 			/* Closest match */
1806 			src_any = !bacmp(&c->src, BDADDR_ANY);
1807 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1808 			if ((src_match && dst_any) || (src_any && dst_match) ||
1809 			    (src_any && dst_any))
1810 				c1 = c;
1811 		}
1812 	}
1813 
1814 	if (c1)
1815 		l2cap_chan_hold(c1);
1816 
1817 	read_unlock(&chan_list_lock);
1818 
1819 	return c1;
1820 }
1821 
1822 static void l2cap_monitor_timeout(struct work_struct *work)
1823 {
1824 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1825 					       monitor_timer.work);
1826 
1827 	BT_DBG("chan %p", chan);
1828 
1829 	l2cap_chan_lock(chan);
1830 
1831 	if (!chan->conn) {
1832 		l2cap_chan_unlock(chan);
1833 		l2cap_chan_put(chan);
1834 		return;
1835 	}
1836 
1837 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1838 
1839 	l2cap_chan_unlock(chan);
1840 	l2cap_chan_put(chan);
1841 }
1842 
1843 static void l2cap_retrans_timeout(struct work_struct *work)
1844 {
1845 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1846 					       retrans_timer.work);
1847 
1848 	BT_DBG("chan %p", chan);
1849 
1850 	l2cap_chan_lock(chan);
1851 
1852 	if (!chan->conn) {
1853 		l2cap_chan_unlock(chan);
1854 		l2cap_chan_put(chan);
1855 		return;
1856 	}
1857 
1858 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1859 	l2cap_chan_unlock(chan);
1860 	l2cap_chan_put(chan);
1861 }
1862 
1863 static void l2cap_streaming_send(struct l2cap_chan *chan,
1864 				 struct sk_buff_head *skbs)
1865 {
1866 	struct sk_buff *skb;
1867 	struct l2cap_ctrl *control;
1868 
1869 	BT_DBG("chan %p, skbs %p", chan, skbs);
1870 
1871 	if (__chan_is_moving(chan))
1872 		return;
1873 
1874 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1875 
1876 	while (!skb_queue_empty(&chan->tx_q)) {
1877 
1878 		skb = skb_dequeue(&chan->tx_q);
1879 
1880 		bt_cb(skb)->l2cap.retries = 1;
1881 		control = &bt_cb(skb)->l2cap;
1882 
1883 		control->reqseq = 0;
1884 		control->txseq = chan->next_tx_seq;
1885 
1886 		__pack_control(chan, control, skb);
1887 
1888 		if (chan->fcs == L2CAP_FCS_CRC16) {
1889 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1890 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1891 		}
1892 
1893 		l2cap_do_send(chan, skb);
1894 
1895 		BT_DBG("Sent txseq %u", control->txseq);
1896 
1897 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1898 		chan->frames_sent++;
1899 	}
1900 }
1901 
1902 static int l2cap_ertm_send(struct l2cap_chan *chan)
1903 {
1904 	struct sk_buff *skb, *tx_skb;
1905 	struct l2cap_ctrl *control;
1906 	int sent = 0;
1907 
1908 	BT_DBG("chan %p", chan);
1909 
1910 	if (chan->state != BT_CONNECTED)
1911 		return -ENOTCONN;
1912 
1913 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1914 		return 0;
1915 
1916 	if (__chan_is_moving(chan))
1917 		return 0;
1918 
1919 	while (chan->tx_send_head &&
1920 	       chan->unacked_frames < chan->remote_tx_win &&
1921 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1922 
1923 		skb = chan->tx_send_head;
1924 
1925 		bt_cb(skb)->l2cap.retries = 1;
1926 		control = &bt_cb(skb)->l2cap;
1927 
1928 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1929 			control->final = 1;
1930 
1931 		control->reqseq = chan->buffer_seq;
1932 		chan->last_acked_seq = chan->buffer_seq;
1933 		control->txseq = chan->next_tx_seq;
1934 
1935 		__pack_control(chan, control, skb);
1936 
1937 		if (chan->fcs == L2CAP_FCS_CRC16) {
1938 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1939 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1940 		}
1941 
1942 		/* Clone after data has been modified. Data is assumed to be
1943 		   read-only (for locking purposes) on cloned sk_buffs.
1944 		 */
1945 		tx_skb = skb_clone(skb, GFP_KERNEL);
1946 
1947 		if (!tx_skb)
1948 			break;
1949 
1950 		__set_retrans_timer(chan);
1951 
1952 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1953 		chan->unacked_frames++;
1954 		chan->frames_sent++;
1955 		sent++;
1956 
1957 		if (skb_queue_is_last(&chan->tx_q, skb))
1958 			chan->tx_send_head = NULL;
1959 		else
1960 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1961 
1962 		l2cap_do_send(chan, tx_skb);
1963 		BT_DBG("Sent txseq %u", control->txseq);
1964 	}
1965 
1966 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1967 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1968 
1969 	return sent;
1970 }
1971 
1972 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1973 {
1974 	struct l2cap_ctrl control;
1975 	struct sk_buff *skb;
1976 	struct sk_buff *tx_skb;
1977 	u16 seq;
1978 
1979 	BT_DBG("chan %p", chan);
1980 
1981 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1982 		return;
1983 
1984 	if (__chan_is_moving(chan))
1985 		return;
1986 
1987 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1988 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1989 
1990 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1991 		if (!skb) {
1992 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1993 			       seq);
1994 			continue;
1995 		}
1996 
1997 		bt_cb(skb)->l2cap.retries++;
1998 		control = bt_cb(skb)->l2cap;
1999 
2000 		if (chan->max_tx != 0 &&
2001 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2002 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2003 			l2cap_send_disconn_req(chan, ECONNRESET);
2004 			l2cap_seq_list_clear(&chan->retrans_list);
2005 			break;
2006 		}
2007 
2008 		control.reqseq = chan->buffer_seq;
2009 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2010 			control.final = 1;
2011 		else
2012 			control.final = 0;
2013 
2014 		if (skb_cloned(skb)) {
2015 			/* Cloned sk_buffs are read-only, so we need a
2016 			 * writeable copy
2017 			 */
2018 			tx_skb = skb_copy(skb, GFP_KERNEL);
2019 		} else {
2020 			tx_skb = skb_clone(skb, GFP_KERNEL);
2021 		}
2022 
2023 		if (!tx_skb) {
2024 			l2cap_seq_list_clear(&chan->retrans_list);
2025 			break;
2026 		}
2027 
2028 		/* Update skb contents */
2029 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2030 			put_unaligned_le32(__pack_extended_control(&control),
2031 					   tx_skb->data + L2CAP_HDR_SIZE);
2032 		} else {
2033 			put_unaligned_le16(__pack_enhanced_control(&control),
2034 					   tx_skb->data + L2CAP_HDR_SIZE);
2035 		}
2036 
2037 		/* Update FCS */
2038 		if (chan->fcs == L2CAP_FCS_CRC16) {
2039 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2040 					tx_skb->len - L2CAP_FCS_SIZE);
2041 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2042 						L2CAP_FCS_SIZE);
2043 		}
2044 
2045 		l2cap_do_send(chan, tx_skb);
2046 
2047 		BT_DBG("Resent txseq %d", control.txseq);
2048 
2049 		chan->last_acked_seq = chan->buffer_seq;
2050 	}
2051 }
2052 
2053 static void l2cap_retransmit(struct l2cap_chan *chan,
2054 			     struct l2cap_ctrl *control)
2055 {
2056 	BT_DBG("chan %p, control %p", chan, control);
2057 
2058 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2059 	l2cap_ertm_resend(chan);
2060 }
2061 
2062 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2063 				 struct l2cap_ctrl *control)
2064 {
2065 	struct sk_buff *skb;
2066 
2067 	BT_DBG("chan %p, control %p", chan, control);
2068 
2069 	if (control->poll)
2070 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2071 
2072 	l2cap_seq_list_clear(&chan->retrans_list);
2073 
2074 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2075 		return;
2076 
2077 	if (chan->unacked_frames) {
2078 		skb_queue_walk(&chan->tx_q, skb) {
2079 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2080 			    skb == chan->tx_send_head)
2081 				break;
2082 		}
2083 
2084 		skb_queue_walk_from(&chan->tx_q, skb) {
2085 			if (skb == chan->tx_send_head)
2086 				break;
2087 
2088 			l2cap_seq_list_append(&chan->retrans_list,
2089 					      bt_cb(skb)->l2cap.txseq);
2090 		}
2091 
2092 		l2cap_ertm_resend(chan);
2093 	}
2094 }
2095 
2096 static void l2cap_send_ack(struct l2cap_chan *chan)
2097 {
2098 	struct l2cap_ctrl control;
2099 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2100 					 chan->last_acked_seq);
2101 	int threshold;
2102 
2103 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2104 	       chan, chan->last_acked_seq, chan->buffer_seq);
2105 
2106 	memset(&control, 0, sizeof(control));
2107 	control.sframe = 1;
2108 
2109 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2110 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2111 		__clear_ack_timer(chan);
2112 		control.super = L2CAP_SUPER_RNR;
2113 		control.reqseq = chan->buffer_seq;
2114 		l2cap_send_sframe(chan, &control);
2115 	} else {
2116 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2117 			l2cap_ertm_send(chan);
2118 			/* If any i-frames were sent, they included an ack */
2119 			if (chan->buffer_seq == chan->last_acked_seq)
2120 				frames_to_ack = 0;
2121 		}
2122 
2123 		/* Ack now if the window is 3/4ths full.
2124 		 * Calculate without mul or div
2125 		 */
2126 		threshold = chan->ack_win;
2127 		threshold += threshold << 1;
2128 		threshold >>= 2;
2129 
2130 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2131 		       threshold);
2132 
2133 		if (frames_to_ack >= threshold) {
2134 			__clear_ack_timer(chan);
2135 			control.super = L2CAP_SUPER_RR;
2136 			control.reqseq = chan->buffer_seq;
2137 			l2cap_send_sframe(chan, &control);
2138 			frames_to_ack = 0;
2139 		}
2140 
2141 		if (frames_to_ack)
2142 			__set_ack_timer(chan);
2143 	}
2144 }
2145 
2146 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2147 					 struct msghdr *msg, int len,
2148 					 int count, struct sk_buff *skb)
2149 {
2150 	struct l2cap_conn *conn = chan->conn;
2151 	struct sk_buff **frag;
2152 	int sent = 0;
2153 
2154 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2155 		return -EFAULT;
2156 
2157 	sent += count;
2158 	len  -= count;
2159 
2160 	/* Continuation fragments (no L2CAP header) */
2161 	frag = &skb_shinfo(skb)->frag_list;
2162 	while (len) {
2163 		struct sk_buff *tmp;
2164 
2165 		count = min_t(unsigned int, conn->mtu, len);
2166 
2167 		tmp = chan->ops->alloc_skb(chan, 0, count,
2168 					   msg->msg_flags & MSG_DONTWAIT);
2169 		if (IS_ERR(tmp))
2170 			return PTR_ERR(tmp);
2171 
2172 		*frag = tmp;
2173 
2174 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2175 				   &msg->msg_iter))
2176 			return -EFAULT;
2177 
2178 		sent += count;
2179 		len  -= count;
2180 
2181 		skb->len += (*frag)->len;
2182 		skb->data_len += (*frag)->len;
2183 
2184 		frag = &(*frag)->next;
2185 	}
2186 
2187 	return sent;
2188 }
2189 
2190 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2191 						 struct msghdr *msg, size_t len)
2192 {
2193 	struct l2cap_conn *conn = chan->conn;
2194 	struct sk_buff *skb;
2195 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2196 	struct l2cap_hdr *lh;
2197 
2198 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2199 	       __le16_to_cpu(chan->psm), len);
2200 
2201 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2202 
2203 	skb = chan->ops->alloc_skb(chan, hlen, count,
2204 				   msg->msg_flags & MSG_DONTWAIT);
2205 	if (IS_ERR(skb))
2206 		return skb;
2207 
2208 	/* Create L2CAP header */
2209 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2210 	lh->cid = cpu_to_le16(chan->dcid);
2211 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2212 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2213 
2214 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2215 	if (unlikely(err < 0)) {
2216 		kfree_skb(skb);
2217 		return ERR_PTR(err);
2218 	}
2219 	return skb;
2220 }
2221 
2222 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2223 					      struct msghdr *msg, size_t len)
2224 {
2225 	struct l2cap_conn *conn = chan->conn;
2226 	struct sk_buff *skb;
2227 	int err, count;
2228 	struct l2cap_hdr *lh;
2229 
2230 	BT_DBG("chan %p len %zu", chan, len);
2231 
2232 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2233 
2234 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2235 				   msg->msg_flags & MSG_DONTWAIT);
2236 	if (IS_ERR(skb))
2237 		return skb;
2238 
2239 	/* Create L2CAP header */
2240 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2241 	lh->cid = cpu_to_le16(chan->dcid);
2242 	lh->len = cpu_to_le16(len);
2243 
2244 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2245 	if (unlikely(err < 0)) {
2246 		kfree_skb(skb);
2247 		return ERR_PTR(err);
2248 	}
2249 	return skb;
2250 }
2251 
2252 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2253 					       struct msghdr *msg, size_t len,
2254 					       u16 sdulen)
2255 {
2256 	struct l2cap_conn *conn = chan->conn;
2257 	struct sk_buff *skb;
2258 	int err, count, hlen;
2259 	struct l2cap_hdr *lh;
2260 
2261 	BT_DBG("chan %p len %zu", chan, len);
2262 
2263 	if (!conn)
2264 		return ERR_PTR(-ENOTCONN);
2265 
2266 	hlen = __ertm_hdr_size(chan);
2267 
2268 	if (sdulen)
2269 		hlen += L2CAP_SDULEN_SIZE;
2270 
2271 	if (chan->fcs == L2CAP_FCS_CRC16)
2272 		hlen += L2CAP_FCS_SIZE;
2273 
2274 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2275 
2276 	skb = chan->ops->alloc_skb(chan, hlen, count,
2277 				   msg->msg_flags & MSG_DONTWAIT);
2278 	if (IS_ERR(skb))
2279 		return skb;
2280 
2281 	/* Create L2CAP header */
2282 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2283 	lh->cid = cpu_to_le16(chan->dcid);
2284 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2285 
2286 	/* Control header is populated later */
2287 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2288 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2289 	else
2290 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2291 
2292 	if (sdulen)
2293 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2294 
2295 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 	if (unlikely(err < 0)) {
2297 		kfree_skb(skb);
2298 		return ERR_PTR(err);
2299 	}
2300 
2301 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2302 	bt_cb(skb)->l2cap.retries = 0;
2303 	return skb;
2304 }
2305 
2306 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2307 			     struct sk_buff_head *seg_queue,
2308 			     struct msghdr *msg, size_t len)
2309 {
2310 	struct sk_buff *skb;
2311 	u16 sdu_len;
2312 	size_t pdu_len;
2313 	u8 sar;
2314 
2315 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2316 
2317 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2318 	 * so fragmented skbs are not used.  The HCI layer's handling
2319 	 * of fragmented skbs is not compatible with ERTM's queueing.
2320 	 */
2321 
2322 	/* PDU size is derived from the HCI MTU */
2323 	pdu_len = chan->conn->mtu;
2324 
2325 	/* Constrain PDU size for BR/EDR connections */
2326 	if (!chan->hs_hcon)
2327 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2328 
2329 	/* Adjust for largest possible L2CAP overhead. */
2330 	if (chan->fcs)
2331 		pdu_len -= L2CAP_FCS_SIZE;
2332 
2333 	pdu_len -= __ertm_hdr_size(chan);
2334 
2335 	/* Remote device may have requested smaller PDUs */
2336 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2337 
2338 	if (len <= pdu_len) {
2339 		sar = L2CAP_SAR_UNSEGMENTED;
2340 		sdu_len = 0;
2341 		pdu_len = len;
2342 	} else {
2343 		sar = L2CAP_SAR_START;
2344 		sdu_len = len;
2345 	}
2346 
2347 	while (len > 0) {
2348 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2349 
2350 		if (IS_ERR(skb)) {
2351 			__skb_queue_purge(seg_queue);
2352 			return PTR_ERR(skb);
2353 		}
2354 
2355 		bt_cb(skb)->l2cap.sar = sar;
2356 		__skb_queue_tail(seg_queue, skb);
2357 
2358 		len -= pdu_len;
2359 		if (sdu_len)
2360 			sdu_len = 0;
2361 
2362 		if (len <= pdu_len) {
2363 			sar = L2CAP_SAR_END;
2364 			pdu_len = len;
2365 		} else {
2366 			sar = L2CAP_SAR_CONTINUE;
2367 		}
2368 	}
2369 
2370 	return 0;
2371 }
2372 
2373 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2374 						   struct msghdr *msg,
2375 						   size_t len, u16 sdulen)
2376 {
2377 	struct l2cap_conn *conn = chan->conn;
2378 	struct sk_buff *skb;
2379 	int err, count, hlen;
2380 	struct l2cap_hdr *lh;
2381 
2382 	BT_DBG("chan %p len %zu", chan, len);
2383 
2384 	if (!conn)
2385 		return ERR_PTR(-ENOTCONN);
2386 
2387 	hlen = L2CAP_HDR_SIZE;
2388 
2389 	if (sdulen)
2390 		hlen += L2CAP_SDULEN_SIZE;
2391 
2392 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2393 
2394 	skb = chan->ops->alloc_skb(chan, hlen, count,
2395 				   msg->msg_flags & MSG_DONTWAIT);
2396 	if (IS_ERR(skb))
2397 		return skb;
2398 
2399 	/* Create L2CAP header */
2400 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2401 	lh->cid = cpu_to_le16(chan->dcid);
2402 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2403 
2404 	if (sdulen)
2405 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2406 
2407 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2408 	if (unlikely(err < 0)) {
2409 		kfree_skb(skb);
2410 		return ERR_PTR(err);
2411 	}
2412 
2413 	return skb;
2414 }
2415 
2416 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2417 				struct sk_buff_head *seg_queue,
2418 				struct msghdr *msg, size_t len)
2419 {
2420 	struct sk_buff *skb;
2421 	size_t pdu_len;
2422 	u16 sdu_len;
2423 
2424 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2425 
2426 	sdu_len = len;
2427 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2428 
2429 	while (len > 0) {
2430 		if (len <= pdu_len)
2431 			pdu_len = len;
2432 
2433 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2434 		if (IS_ERR(skb)) {
2435 			__skb_queue_purge(seg_queue);
2436 			return PTR_ERR(skb);
2437 		}
2438 
2439 		__skb_queue_tail(seg_queue, skb);
2440 
2441 		len -= pdu_len;
2442 
2443 		if (sdu_len) {
2444 			sdu_len = 0;
2445 			pdu_len += L2CAP_SDULEN_SIZE;
2446 		}
2447 	}
2448 
2449 	return 0;
2450 }
2451 
2452 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2453 {
2454 	int sent = 0;
2455 
2456 	BT_DBG("chan %p", chan);
2457 
2458 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2459 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2460 		chan->tx_credits--;
2461 		sent++;
2462 	}
2463 
2464 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2465 	       skb_queue_len(&chan->tx_q));
2466 }
2467 
2468 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2469 {
2470 	struct sk_buff *skb;
2471 	int err;
2472 	struct sk_buff_head seg_queue;
2473 
2474 	if (!chan->conn)
2475 		return -ENOTCONN;
2476 
2477 	/* Connectionless channel */
2478 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2479 		skb = l2cap_create_connless_pdu(chan, msg, len);
2480 		if (IS_ERR(skb))
2481 			return PTR_ERR(skb);
2482 
2483 		/* Channel lock is released before requesting new skb and then
2484 		 * reacquired thus we need to recheck channel state.
2485 		 */
2486 		if (chan->state != BT_CONNECTED) {
2487 			kfree_skb(skb);
2488 			return -ENOTCONN;
2489 		}
2490 
2491 		l2cap_do_send(chan, skb);
2492 		return len;
2493 	}
2494 
2495 	switch (chan->mode) {
2496 	case L2CAP_MODE_LE_FLOWCTL:
2497 		/* Check outgoing MTU */
2498 		if (len > chan->omtu)
2499 			return -EMSGSIZE;
2500 
2501 		__skb_queue_head_init(&seg_queue);
2502 
2503 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2504 
2505 		if (chan->state != BT_CONNECTED) {
2506 			__skb_queue_purge(&seg_queue);
2507 			err = -ENOTCONN;
2508 		}
2509 
2510 		if (err)
2511 			return err;
2512 
2513 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2514 
2515 		l2cap_le_flowctl_send(chan);
2516 
2517 		if (!chan->tx_credits)
2518 			chan->ops->suspend(chan);
2519 
2520 		err = len;
2521 
2522 		break;
2523 
2524 	case L2CAP_MODE_BASIC:
2525 		/* Check outgoing MTU */
2526 		if (len > chan->omtu)
2527 			return -EMSGSIZE;
2528 
2529 		/* Create a basic PDU */
2530 		skb = l2cap_create_basic_pdu(chan, msg, len);
2531 		if (IS_ERR(skb))
2532 			return PTR_ERR(skb);
2533 
2534 		/* Channel lock is released before requesting new skb and then
2535 		 * reacquired thus we need to recheck channel state.
2536 		 */
2537 		if (chan->state != BT_CONNECTED) {
2538 			kfree_skb(skb);
2539 			return -ENOTCONN;
2540 		}
2541 
2542 		l2cap_do_send(chan, skb);
2543 		err = len;
2544 		break;
2545 
2546 	case L2CAP_MODE_ERTM:
2547 	case L2CAP_MODE_STREAMING:
2548 		/* Check outgoing MTU */
2549 		if (len > chan->omtu) {
2550 			err = -EMSGSIZE;
2551 			break;
2552 		}
2553 
2554 		__skb_queue_head_init(&seg_queue);
2555 
2556 		/* Do segmentation before calling in to the state machine,
2557 		 * since it's possible to block while waiting for memory
2558 		 * allocation.
2559 		 */
2560 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2561 
2562 		/* The channel could have been closed while segmenting,
2563 		 * check that it is still connected.
2564 		 */
2565 		if (chan->state != BT_CONNECTED) {
2566 			__skb_queue_purge(&seg_queue);
2567 			err = -ENOTCONN;
2568 		}
2569 
2570 		if (err)
2571 			break;
2572 
2573 		if (chan->mode == L2CAP_MODE_ERTM)
2574 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2575 		else
2576 			l2cap_streaming_send(chan, &seg_queue);
2577 
2578 		err = len;
2579 
2580 		/* If the skbs were not queued for sending, they'll still be in
2581 		 * seg_queue and need to be purged.
2582 		 */
2583 		__skb_queue_purge(&seg_queue);
2584 		break;
2585 
2586 	default:
2587 		BT_DBG("bad state %1.1x", chan->mode);
2588 		err = -EBADFD;
2589 	}
2590 
2591 	return err;
2592 }
2593 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2594 
2595 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2596 {
2597 	struct l2cap_ctrl control;
2598 	u16 seq;
2599 
2600 	BT_DBG("chan %p, txseq %u", chan, txseq);
2601 
2602 	memset(&control, 0, sizeof(control));
2603 	control.sframe = 1;
2604 	control.super = L2CAP_SUPER_SREJ;
2605 
2606 	for (seq = chan->expected_tx_seq; seq != txseq;
2607 	     seq = __next_seq(chan, seq)) {
2608 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2609 			control.reqseq = seq;
2610 			l2cap_send_sframe(chan, &control);
2611 			l2cap_seq_list_append(&chan->srej_list, seq);
2612 		}
2613 	}
2614 
2615 	chan->expected_tx_seq = __next_seq(chan, txseq);
2616 }
2617 
2618 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2619 {
2620 	struct l2cap_ctrl control;
2621 
2622 	BT_DBG("chan %p", chan);
2623 
2624 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2625 		return;
2626 
2627 	memset(&control, 0, sizeof(control));
2628 	control.sframe = 1;
2629 	control.super = L2CAP_SUPER_SREJ;
2630 	control.reqseq = chan->srej_list.tail;
2631 	l2cap_send_sframe(chan, &control);
2632 }
2633 
2634 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2635 {
2636 	struct l2cap_ctrl control;
2637 	u16 initial_head;
2638 	u16 seq;
2639 
2640 	BT_DBG("chan %p, txseq %u", chan, txseq);
2641 
2642 	memset(&control, 0, sizeof(control));
2643 	control.sframe = 1;
2644 	control.super = L2CAP_SUPER_SREJ;
2645 
2646 	/* Capture initial list head to allow only one pass through the list. */
2647 	initial_head = chan->srej_list.head;
2648 
2649 	do {
2650 		seq = l2cap_seq_list_pop(&chan->srej_list);
2651 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2652 			break;
2653 
2654 		control.reqseq = seq;
2655 		l2cap_send_sframe(chan, &control);
2656 		l2cap_seq_list_append(&chan->srej_list, seq);
2657 	} while (chan->srej_list.head != initial_head);
2658 }
2659 
2660 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2661 {
2662 	struct sk_buff *acked_skb;
2663 	u16 ackseq;
2664 
2665 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2666 
2667 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2668 		return;
2669 
2670 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2671 	       chan->expected_ack_seq, chan->unacked_frames);
2672 
2673 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2674 	     ackseq = __next_seq(chan, ackseq)) {
2675 
2676 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2677 		if (acked_skb) {
2678 			skb_unlink(acked_skb, &chan->tx_q);
2679 			kfree_skb(acked_skb);
2680 			chan->unacked_frames--;
2681 		}
2682 	}
2683 
2684 	chan->expected_ack_seq = reqseq;
2685 
2686 	if (chan->unacked_frames == 0)
2687 		__clear_retrans_timer(chan);
2688 
2689 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2690 }
2691 
2692 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2693 {
2694 	BT_DBG("chan %p", chan);
2695 
2696 	chan->expected_tx_seq = chan->buffer_seq;
2697 	l2cap_seq_list_clear(&chan->srej_list);
2698 	skb_queue_purge(&chan->srej_q);
2699 	chan->rx_state = L2CAP_RX_STATE_RECV;
2700 }
2701 
2702 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2703 				struct l2cap_ctrl *control,
2704 				struct sk_buff_head *skbs, u8 event)
2705 {
2706 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2707 	       event);
2708 
2709 	switch (event) {
2710 	case L2CAP_EV_DATA_REQUEST:
2711 		if (chan->tx_send_head == NULL)
2712 			chan->tx_send_head = skb_peek(skbs);
2713 
2714 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2715 		l2cap_ertm_send(chan);
2716 		break;
2717 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 		BT_DBG("Enter LOCAL_BUSY");
2719 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720 
2721 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 			/* The SREJ_SENT state must be aborted if we are to
2723 			 * enter the LOCAL_BUSY state.
2724 			 */
2725 			l2cap_abort_rx_srej_sent(chan);
2726 		}
2727 
2728 		l2cap_send_ack(chan);
2729 
2730 		break;
2731 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 		BT_DBG("Exit LOCAL_BUSY");
2733 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 
2735 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 			struct l2cap_ctrl local_control;
2737 
2738 			memset(&local_control, 0, sizeof(local_control));
2739 			local_control.sframe = 1;
2740 			local_control.super = L2CAP_SUPER_RR;
2741 			local_control.poll = 1;
2742 			local_control.reqseq = chan->buffer_seq;
2743 			l2cap_send_sframe(chan, &local_control);
2744 
2745 			chan->retry_count = 1;
2746 			__set_monitor_timer(chan);
2747 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 		}
2749 		break;
2750 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2751 		l2cap_process_reqseq(chan, control->reqseq);
2752 		break;
2753 	case L2CAP_EV_EXPLICIT_POLL:
2754 		l2cap_send_rr_or_rnr(chan, 1);
2755 		chan->retry_count = 1;
2756 		__set_monitor_timer(chan);
2757 		__clear_ack_timer(chan);
2758 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2759 		break;
2760 	case L2CAP_EV_RETRANS_TO:
2761 		l2cap_send_rr_or_rnr(chan, 1);
2762 		chan->retry_count = 1;
2763 		__set_monitor_timer(chan);
2764 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2765 		break;
2766 	case L2CAP_EV_RECV_FBIT:
2767 		/* Nothing to process */
2768 		break;
2769 	default:
2770 		break;
2771 	}
2772 }
2773 
2774 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2775 				  struct l2cap_ctrl *control,
2776 				  struct sk_buff_head *skbs, u8 event)
2777 {
2778 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2779 	       event);
2780 
2781 	switch (event) {
2782 	case L2CAP_EV_DATA_REQUEST:
2783 		if (chan->tx_send_head == NULL)
2784 			chan->tx_send_head = skb_peek(skbs);
2785 		/* Queue data, but don't send. */
2786 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2787 		break;
2788 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2789 		BT_DBG("Enter LOCAL_BUSY");
2790 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2791 
2792 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2793 			/* The SREJ_SENT state must be aborted if we are to
2794 			 * enter the LOCAL_BUSY state.
2795 			 */
2796 			l2cap_abort_rx_srej_sent(chan);
2797 		}
2798 
2799 		l2cap_send_ack(chan);
2800 
2801 		break;
2802 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2803 		BT_DBG("Exit LOCAL_BUSY");
2804 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2805 
2806 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2807 			struct l2cap_ctrl local_control;
2808 			memset(&local_control, 0, sizeof(local_control));
2809 			local_control.sframe = 1;
2810 			local_control.super = L2CAP_SUPER_RR;
2811 			local_control.poll = 1;
2812 			local_control.reqseq = chan->buffer_seq;
2813 			l2cap_send_sframe(chan, &local_control);
2814 
2815 			chan->retry_count = 1;
2816 			__set_monitor_timer(chan);
2817 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2818 		}
2819 		break;
2820 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2821 		l2cap_process_reqseq(chan, control->reqseq);
2822 
2823 		/* Fall through */
2824 
2825 	case L2CAP_EV_RECV_FBIT:
2826 		if (control && control->final) {
2827 			__clear_monitor_timer(chan);
2828 			if (chan->unacked_frames > 0)
2829 				__set_retrans_timer(chan);
2830 			chan->retry_count = 0;
2831 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2832 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2833 		}
2834 		break;
2835 	case L2CAP_EV_EXPLICIT_POLL:
2836 		/* Ignore */
2837 		break;
2838 	case L2CAP_EV_MONITOR_TO:
2839 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2840 			l2cap_send_rr_or_rnr(chan, 1);
2841 			__set_monitor_timer(chan);
2842 			chan->retry_count++;
2843 		} else {
2844 			l2cap_send_disconn_req(chan, ECONNABORTED);
2845 		}
2846 		break;
2847 	default:
2848 		break;
2849 	}
2850 }
2851 
2852 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2853 		     struct sk_buff_head *skbs, u8 event)
2854 {
2855 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2856 	       chan, control, skbs, event, chan->tx_state);
2857 
2858 	switch (chan->tx_state) {
2859 	case L2CAP_TX_STATE_XMIT:
2860 		l2cap_tx_state_xmit(chan, control, skbs, event);
2861 		break;
2862 	case L2CAP_TX_STATE_WAIT_F:
2863 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2864 		break;
2865 	default:
2866 		/* Ignore event */
2867 		break;
2868 	}
2869 }
2870 
2871 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2872 			     struct l2cap_ctrl *control)
2873 {
2874 	BT_DBG("chan %p, control %p", chan, control);
2875 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2876 }
2877 
2878 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2879 				  struct l2cap_ctrl *control)
2880 {
2881 	BT_DBG("chan %p, control %p", chan, control);
2882 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2883 }
2884 
2885 /* Copy frame to all raw sockets on that connection */
2886 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2887 {
2888 	struct sk_buff *nskb;
2889 	struct l2cap_chan *chan;
2890 
2891 	BT_DBG("conn %p", conn);
2892 
2893 	mutex_lock(&conn->chan_lock);
2894 
2895 	list_for_each_entry(chan, &conn->chan_l, list) {
2896 		if (chan->chan_type != L2CAP_CHAN_RAW)
2897 			continue;
2898 
2899 		/* Don't send frame to the channel it came from */
2900 		if (bt_cb(skb)->l2cap.chan == chan)
2901 			continue;
2902 
2903 		nskb = skb_clone(skb, GFP_KERNEL);
2904 		if (!nskb)
2905 			continue;
2906 		if (chan->ops->recv(chan, nskb))
2907 			kfree_skb(nskb);
2908 	}
2909 
2910 	mutex_unlock(&conn->chan_lock);
2911 }
2912 
2913 /* ---- L2CAP signalling commands ---- */
2914 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2915 				       u8 ident, u16 dlen, void *data)
2916 {
2917 	struct sk_buff *skb, **frag;
2918 	struct l2cap_cmd_hdr *cmd;
2919 	struct l2cap_hdr *lh;
2920 	int len, count;
2921 
2922 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2923 	       conn, code, ident, dlen);
2924 
2925 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2926 		return NULL;
2927 
2928 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2929 	count = min_t(unsigned int, conn->mtu, len);
2930 
2931 	skb = bt_skb_alloc(count, GFP_KERNEL);
2932 	if (!skb)
2933 		return NULL;
2934 
2935 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2936 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2937 
2938 	if (conn->hcon->type == LE_LINK)
2939 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2940 	else
2941 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2942 
2943 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2944 	cmd->code  = code;
2945 	cmd->ident = ident;
2946 	cmd->len   = cpu_to_le16(dlen);
2947 
2948 	if (dlen) {
2949 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2950 		skb_put_data(skb, data, count);
2951 		data += count;
2952 	}
2953 
2954 	len -= skb->len;
2955 
2956 	/* Continuation fragments (no L2CAP header) */
2957 	frag = &skb_shinfo(skb)->frag_list;
2958 	while (len) {
2959 		count = min_t(unsigned int, conn->mtu, len);
2960 
2961 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2962 		if (!*frag)
2963 			goto fail;
2964 
2965 		skb_put_data(*frag, data, count);
2966 
2967 		len  -= count;
2968 		data += count;
2969 
2970 		frag = &(*frag)->next;
2971 	}
2972 
2973 	return skb;
2974 
2975 fail:
2976 	kfree_skb(skb);
2977 	return NULL;
2978 }
2979 
2980 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2981 				     unsigned long *val)
2982 {
2983 	struct l2cap_conf_opt *opt = *ptr;
2984 	int len;
2985 
2986 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2987 	*ptr += len;
2988 
2989 	*type = opt->type;
2990 	*olen = opt->len;
2991 
2992 	switch (opt->len) {
2993 	case 1:
2994 		*val = *((u8 *) opt->val);
2995 		break;
2996 
2997 	case 2:
2998 		*val = get_unaligned_le16(opt->val);
2999 		break;
3000 
3001 	case 4:
3002 		*val = get_unaligned_le32(opt->val);
3003 		break;
3004 
3005 	default:
3006 		*val = (unsigned long) opt->val;
3007 		break;
3008 	}
3009 
3010 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3011 	return len;
3012 }
3013 
3014 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3015 {
3016 	struct l2cap_conf_opt *opt = *ptr;
3017 
3018 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3019 
3020 	if (size < L2CAP_CONF_OPT_SIZE + len)
3021 		return;
3022 
3023 	opt->type = type;
3024 	opt->len  = len;
3025 
3026 	switch (len) {
3027 	case 1:
3028 		*((u8 *) opt->val)  = val;
3029 		break;
3030 
3031 	case 2:
3032 		put_unaligned_le16(val, opt->val);
3033 		break;
3034 
3035 	case 4:
3036 		put_unaligned_le32(val, opt->val);
3037 		break;
3038 
3039 	default:
3040 		memcpy(opt->val, (void *) val, len);
3041 		break;
3042 	}
3043 
3044 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3045 }
3046 
3047 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3048 {
3049 	struct l2cap_conf_efs efs;
3050 
3051 	switch (chan->mode) {
3052 	case L2CAP_MODE_ERTM:
3053 		efs.id		= chan->local_id;
3054 		efs.stype	= chan->local_stype;
3055 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3056 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3057 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3058 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3059 		break;
3060 
3061 	case L2CAP_MODE_STREAMING:
3062 		efs.id		= 1;
3063 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3064 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3065 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3066 		efs.acc_lat	= 0;
3067 		efs.flush_to	= 0;
3068 		break;
3069 
3070 	default:
3071 		return;
3072 	}
3073 
3074 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3075 			   (unsigned long) &efs, size);
3076 }
3077 
3078 static void l2cap_ack_timeout(struct work_struct *work)
3079 {
3080 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3081 					       ack_timer.work);
3082 	u16 frames_to_ack;
3083 
3084 	BT_DBG("chan %p", chan);
3085 
3086 	l2cap_chan_lock(chan);
3087 
3088 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3089 				     chan->last_acked_seq);
3090 
3091 	if (frames_to_ack)
3092 		l2cap_send_rr_or_rnr(chan, 0);
3093 
3094 	l2cap_chan_unlock(chan);
3095 	l2cap_chan_put(chan);
3096 }
3097 
3098 int l2cap_ertm_init(struct l2cap_chan *chan)
3099 {
3100 	int err;
3101 
3102 	chan->next_tx_seq = 0;
3103 	chan->expected_tx_seq = 0;
3104 	chan->expected_ack_seq = 0;
3105 	chan->unacked_frames = 0;
3106 	chan->buffer_seq = 0;
3107 	chan->frames_sent = 0;
3108 	chan->last_acked_seq = 0;
3109 	chan->sdu = NULL;
3110 	chan->sdu_last_frag = NULL;
3111 	chan->sdu_len = 0;
3112 
3113 	skb_queue_head_init(&chan->tx_q);
3114 
3115 	chan->local_amp_id = AMP_ID_BREDR;
3116 	chan->move_id = AMP_ID_BREDR;
3117 	chan->move_state = L2CAP_MOVE_STABLE;
3118 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3119 
3120 	if (chan->mode != L2CAP_MODE_ERTM)
3121 		return 0;
3122 
3123 	chan->rx_state = L2CAP_RX_STATE_RECV;
3124 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3125 
3126 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3127 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3128 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3129 
3130 	skb_queue_head_init(&chan->srej_q);
3131 
3132 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3133 	if (err < 0)
3134 		return err;
3135 
3136 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3137 	if (err < 0)
3138 		l2cap_seq_list_free(&chan->srej_list);
3139 
3140 	return err;
3141 }
3142 
3143 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3144 {
3145 	switch (mode) {
3146 	case L2CAP_MODE_STREAMING:
3147 	case L2CAP_MODE_ERTM:
3148 		if (l2cap_mode_supported(mode, remote_feat_mask))
3149 			return mode;
3150 		/* fall through */
3151 	default:
3152 		return L2CAP_MODE_BASIC;
3153 	}
3154 }
3155 
3156 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3157 {
3158 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3159 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3160 }
3161 
3162 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3163 {
3164 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3165 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3166 }
3167 
3168 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3169 				      struct l2cap_conf_rfc *rfc)
3170 {
3171 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3172 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3173 
3174 		/* Class 1 devices have must have ERTM timeouts
3175 		 * exceeding the Link Supervision Timeout.  The
3176 		 * default Link Supervision Timeout for AMP
3177 		 * controllers is 10 seconds.
3178 		 *
3179 		 * Class 1 devices use 0xffffffff for their
3180 		 * best-effort flush timeout, so the clamping logic
3181 		 * will result in a timeout that meets the above
3182 		 * requirement.  ERTM timeouts are 16-bit values, so
3183 		 * the maximum timeout is 65.535 seconds.
3184 		 */
3185 
3186 		/* Convert timeout to milliseconds and round */
3187 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3188 
3189 		/* This is the recommended formula for class 2 devices
3190 		 * that start ERTM timers when packets are sent to the
3191 		 * controller.
3192 		 */
3193 		ertm_to = 3 * ertm_to + 500;
3194 
3195 		if (ertm_to > 0xffff)
3196 			ertm_to = 0xffff;
3197 
3198 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3199 		rfc->monitor_timeout = rfc->retrans_timeout;
3200 	} else {
3201 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3202 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3203 	}
3204 }
3205 
3206 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3207 {
3208 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3209 	    __l2cap_ews_supported(chan->conn)) {
3210 		/* use extended control field */
3211 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3212 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3213 	} else {
3214 		chan->tx_win = min_t(u16, chan->tx_win,
3215 				     L2CAP_DEFAULT_TX_WINDOW);
3216 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3217 	}
3218 	chan->ack_win = chan->tx_win;
3219 }
3220 
3221 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3222 {
3223 	struct l2cap_conf_req *req = data;
3224 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3225 	void *ptr = req->data;
3226 	void *endptr = data + data_size;
3227 	u16 size;
3228 
3229 	BT_DBG("chan %p", chan);
3230 
3231 	if (chan->num_conf_req || chan->num_conf_rsp)
3232 		goto done;
3233 
3234 	switch (chan->mode) {
3235 	case L2CAP_MODE_STREAMING:
3236 	case L2CAP_MODE_ERTM:
3237 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3238 			break;
3239 
3240 		if (__l2cap_efs_supported(chan->conn))
3241 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3242 
3243 		/* fall through */
3244 	default:
3245 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3246 		break;
3247 	}
3248 
3249 done:
3250 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3251 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3252 
3253 	switch (chan->mode) {
3254 	case L2CAP_MODE_BASIC:
3255 		if (disable_ertm)
3256 			break;
3257 
3258 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3259 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3260 			break;
3261 
3262 		rfc.mode            = L2CAP_MODE_BASIC;
3263 		rfc.txwin_size      = 0;
3264 		rfc.max_transmit    = 0;
3265 		rfc.retrans_timeout = 0;
3266 		rfc.monitor_timeout = 0;
3267 		rfc.max_pdu_size    = 0;
3268 
3269 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3270 				   (unsigned long) &rfc, endptr - ptr);
3271 		break;
3272 
3273 	case L2CAP_MODE_ERTM:
3274 		rfc.mode            = L2CAP_MODE_ERTM;
3275 		rfc.max_transmit    = chan->max_tx;
3276 
3277 		__l2cap_set_ertm_timeouts(chan, &rfc);
3278 
3279 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3280 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3281 			     L2CAP_FCS_SIZE);
3282 		rfc.max_pdu_size = cpu_to_le16(size);
3283 
3284 		l2cap_txwin_setup(chan);
3285 
3286 		rfc.txwin_size = min_t(u16, chan->tx_win,
3287 				       L2CAP_DEFAULT_TX_WINDOW);
3288 
3289 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3290 				   (unsigned long) &rfc, endptr - ptr);
3291 
3292 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3293 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3294 
3295 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3296 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3297 					   chan->tx_win, endptr - ptr);
3298 
3299 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3300 			if (chan->fcs == L2CAP_FCS_NONE ||
3301 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3302 				chan->fcs = L2CAP_FCS_NONE;
3303 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3304 						   chan->fcs, endptr - ptr);
3305 			}
3306 		break;
3307 
3308 	case L2CAP_MODE_STREAMING:
3309 		l2cap_txwin_setup(chan);
3310 		rfc.mode            = L2CAP_MODE_STREAMING;
3311 		rfc.txwin_size      = 0;
3312 		rfc.max_transmit    = 0;
3313 		rfc.retrans_timeout = 0;
3314 		rfc.monitor_timeout = 0;
3315 
3316 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3317 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3318 			     L2CAP_FCS_SIZE);
3319 		rfc.max_pdu_size = cpu_to_le16(size);
3320 
3321 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3322 				   (unsigned long) &rfc, endptr - ptr);
3323 
3324 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3325 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3326 
3327 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3328 			if (chan->fcs == L2CAP_FCS_NONE ||
3329 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3330 				chan->fcs = L2CAP_FCS_NONE;
3331 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3332 						   chan->fcs, endptr - ptr);
3333 			}
3334 		break;
3335 	}
3336 
3337 	req->dcid  = cpu_to_le16(chan->dcid);
3338 	req->flags = cpu_to_le16(0);
3339 
3340 	return ptr - data;
3341 }
3342 
3343 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3344 {
3345 	struct l2cap_conf_rsp *rsp = data;
3346 	void *ptr = rsp->data;
3347 	void *endptr = data + data_size;
3348 	void *req = chan->conf_req;
3349 	int len = chan->conf_len;
3350 	int type, hint, olen;
3351 	unsigned long val;
3352 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3353 	struct l2cap_conf_efs efs;
3354 	u8 remote_efs = 0;
3355 	u16 mtu = L2CAP_DEFAULT_MTU;
3356 	u16 result = L2CAP_CONF_SUCCESS;
3357 	u16 size;
3358 
3359 	BT_DBG("chan %p", chan);
3360 
3361 	while (len >= L2CAP_CONF_OPT_SIZE) {
3362 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3363 		if (len < 0)
3364 			break;
3365 
3366 		hint  = type & L2CAP_CONF_HINT;
3367 		type &= L2CAP_CONF_MASK;
3368 
3369 		switch (type) {
3370 		case L2CAP_CONF_MTU:
3371 			if (olen != 2)
3372 				break;
3373 			mtu = val;
3374 			break;
3375 
3376 		case L2CAP_CONF_FLUSH_TO:
3377 			if (olen != 2)
3378 				break;
3379 			chan->flush_to = val;
3380 			break;
3381 
3382 		case L2CAP_CONF_QOS:
3383 			break;
3384 
3385 		case L2CAP_CONF_RFC:
3386 			if (olen != sizeof(rfc))
3387 				break;
3388 			memcpy(&rfc, (void *) val, olen);
3389 			break;
3390 
3391 		case L2CAP_CONF_FCS:
3392 			if (olen != 1)
3393 				break;
3394 			if (val == L2CAP_FCS_NONE)
3395 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3396 			break;
3397 
3398 		case L2CAP_CONF_EFS:
3399 			if (olen != sizeof(efs))
3400 				break;
3401 			remote_efs = 1;
3402 			memcpy(&efs, (void *) val, olen);
3403 			break;
3404 
3405 		case L2CAP_CONF_EWS:
3406 			if (olen != 2)
3407 				break;
3408 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3409 				return -ECONNREFUSED;
3410 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3412 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3413 			chan->remote_tx_win = val;
3414 			break;
3415 
3416 		default:
3417 			if (hint)
3418 				break;
3419 			result = L2CAP_CONF_UNKNOWN;
3420 			*((u8 *) ptr++) = type;
3421 			break;
3422 		}
3423 	}
3424 
3425 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3426 		goto done;
3427 
3428 	switch (chan->mode) {
3429 	case L2CAP_MODE_STREAMING:
3430 	case L2CAP_MODE_ERTM:
3431 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3432 			chan->mode = l2cap_select_mode(rfc.mode,
3433 						       chan->conn->feat_mask);
3434 			break;
3435 		}
3436 
3437 		if (remote_efs) {
3438 			if (__l2cap_efs_supported(chan->conn))
3439 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3440 			else
3441 				return -ECONNREFUSED;
3442 		}
3443 
3444 		if (chan->mode != rfc.mode)
3445 			return -ECONNREFUSED;
3446 
3447 		break;
3448 	}
3449 
3450 done:
3451 	if (chan->mode != rfc.mode) {
3452 		result = L2CAP_CONF_UNACCEPT;
3453 		rfc.mode = chan->mode;
3454 
3455 		if (chan->num_conf_rsp == 1)
3456 			return -ECONNREFUSED;
3457 
3458 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3459 				   (unsigned long) &rfc, endptr - ptr);
3460 	}
3461 
3462 	if (result == L2CAP_CONF_SUCCESS) {
3463 		/* Configure output options and let the other side know
3464 		 * which ones we don't like. */
3465 
3466 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3467 			result = L2CAP_CONF_UNACCEPT;
3468 		else {
3469 			chan->omtu = mtu;
3470 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3471 		}
3472 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3473 
3474 		if (remote_efs) {
3475 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3476 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3477 			    efs.stype != chan->local_stype) {
3478 
3479 				result = L2CAP_CONF_UNACCEPT;
3480 
3481 				if (chan->num_conf_req >= 1)
3482 					return -ECONNREFUSED;
3483 
3484 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3485 						   sizeof(efs),
3486 						   (unsigned long) &efs, endptr - ptr);
3487 			} else {
3488 				/* Send PENDING Conf Rsp */
3489 				result = L2CAP_CONF_PENDING;
3490 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3491 			}
3492 		}
3493 
3494 		switch (rfc.mode) {
3495 		case L2CAP_MODE_BASIC:
3496 			chan->fcs = L2CAP_FCS_NONE;
3497 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3498 			break;
3499 
3500 		case L2CAP_MODE_ERTM:
3501 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3502 				chan->remote_tx_win = rfc.txwin_size;
3503 			else
3504 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3505 
3506 			chan->remote_max_tx = rfc.max_transmit;
3507 
3508 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3509 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3510 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3511 			rfc.max_pdu_size = cpu_to_le16(size);
3512 			chan->remote_mps = size;
3513 
3514 			__l2cap_set_ertm_timeouts(chan, &rfc);
3515 
3516 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3517 
3518 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3519 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3520 
3521 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3522 				chan->remote_id = efs.id;
3523 				chan->remote_stype = efs.stype;
3524 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3525 				chan->remote_flush_to =
3526 					le32_to_cpu(efs.flush_to);
3527 				chan->remote_acc_lat =
3528 					le32_to_cpu(efs.acc_lat);
3529 				chan->remote_sdu_itime =
3530 					le32_to_cpu(efs.sdu_itime);
3531 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3532 						   sizeof(efs),
3533 						   (unsigned long) &efs, endptr - ptr);
3534 			}
3535 			break;
3536 
3537 		case L2CAP_MODE_STREAMING:
3538 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3539 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3540 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3541 			rfc.max_pdu_size = cpu_to_le16(size);
3542 			chan->remote_mps = size;
3543 
3544 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3545 
3546 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3547 					   (unsigned long) &rfc, endptr - ptr);
3548 
3549 			break;
3550 
3551 		default:
3552 			result = L2CAP_CONF_UNACCEPT;
3553 
3554 			memset(&rfc, 0, sizeof(rfc));
3555 			rfc.mode = chan->mode;
3556 		}
3557 
3558 		if (result == L2CAP_CONF_SUCCESS)
3559 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3560 	}
3561 	rsp->scid   = cpu_to_le16(chan->dcid);
3562 	rsp->result = cpu_to_le16(result);
3563 	rsp->flags  = cpu_to_le16(0);
3564 
3565 	return ptr - data;
3566 }
3567 
3568 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3569 				void *data, size_t size, u16 *result)
3570 {
3571 	struct l2cap_conf_req *req = data;
3572 	void *ptr = req->data;
3573 	void *endptr = data + size;
3574 	int type, olen;
3575 	unsigned long val;
3576 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3577 	struct l2cap_conf_efs efs;
3578 
3579 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3580 
3581 	while (len >= L2CAP_CONF_OPT_SIZE) {
3582 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3583 		if (len < 0)
3584 			break;
3585 
3586 		switch (type) {
3587 		case L2CAP_CONF_MTU:
3588 			if (olen != 2)
3589 				break;
3590 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3591 				*result = L2CAP_CONF_UNACCEPT;
3592 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3593 			} else
3594 				chan->imtu = val;
3595 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3596 					   endptr - ptr);
3597 			break;
3598 
3599 		case L2CAP_CONF_FLUSH_TO:
3600 			if (olen != 2)
3601 				break;
3602 			chan->flush_to = val;
3603 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3604 					   chan->flush_to, endptr - ptr);
3605 			break;
3606 
3607 		case L2CAP_CONF_RFC:
3608 			if (olen != sizeof(rfc))
3609 				break;
3610 			memcpy(&rfc, (void *)val, olen);
3611 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3612 			    rfc.mode != chan->mode)
3613 				return -ECONNREFUSED;
3614 			chan->fcs = 0;
3615 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3616 					   (unsigned long) &rfc, endptr - ptr);
3617 			break;
3618 
3619 		case L2CAP_CONF_EWS:
3620 			if (olen != 2)
3621 				break;
3622 			chan->ack_win = min_t(u16, val, chan->ack_win);
3623 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3624 					   chan->tx_win, endptr - ptr);
3625 			break;
3626 
3627 		case L2CAP_CONF_EFS:
3628 			if (olen != sizeof(efs))
3629 				break;
3630 			memcpy(&efs, (void *)val, olen);
3631 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3632 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3633 			    efs.stype != chan->local_stype)
3634 				return -ECONNREFUSED;
3635 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3636 					   (unsigned long) &efs, endptr - ptr);
3637 			break;
3638 
3639 		case L2CAP_CONF_FCS:
3640 			if (olen != 1)
3641 				break;
3642 			if (*result == L2CAP_CONF_PENDING)
3643 				if (val == L2CAP_FCS_NONE)
3644 					set_bit(CONF_RECV_NO_FCS,
3645 						&chan->conf_state);
3646 			break;
3647 		}
3648 	}
3649 
3650 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3651 		return -ECONNREFUSED;
3652 
3653 	chan->mode = rfc.mode;
3654 
3655 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3656 		switch (rfc.mode) {
3657 		case L2CAP_MODE_ERTM:
3658 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3659 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3660 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3661 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3662 				chan->ack_win = min_t(u16, chan->ack_win,
3663 						      rfc.txwin_size);
3664 
3665 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3666 				chan->local_msdu = le16_to_cpu(efs.msdu);
3667 				chan->local_sdu_itime =
3668 					le32_to_cpu(efs.sdu_itime);
3669 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3670 				chan->local_flush_to =
3671 					le32_to_cpu(efs.flush_to);
3672 			}
3673 			break;
3674 
3675 		case L2CAP_MODE_STREAMING:
3676 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3677 		}
3678 	}
3679 
3680 	req->dcid   = cpu_to_le16(chan->dcid);
3681 	req->flags  = cpu_to_le16(0);
3682 
3683 	return ptr - data;
3684 }
3685 
3686 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3687 				u16 result, u16 flags)
3688 {
3689 	struct l2cap_conf_rsp *rsp = data;
3690 	void *ptr = rsp->data;
3691 
3692 	BT_DBG("chan %p", chan);
3693 
3694 	rsp->scid   = cpu_to_le16(chan->dcid);
3695 	rsp->result = cpu_to_le16(result);
3696 	rsp->flags  = cpu_to_le16(flags);
3697 
3698 	return ptr - data;
3699 }
3700 
3701 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3702 {
3703 	struct l2cap_le_conn_rsp rsp;
3704 	struct l2cap_conn *conn = chan->conn;
3705 
3706 	BT_DBG("chan %p", chan);
3707 
3708 	rsp.dcid    = cpu_to_le16(chan->scid);
3709 	rsp.mtu     = cpu_to_le16(chan->imtu);
3710 	rsp.mps     = cpu_to_le16(chan->mps);
3711 	rsp.credits = cpu_to_le16(chan->rx_credits);
3712 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3713 
3714 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3715 		       &rsp);
3716 }
3717 
3718 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3719 {
3720 	struct l2cap_conn_rsp rsp;
3721 	struct l2cap_conn *conn = chan->conn;
3722 	u8 buf[128];
3723 	u8 rsp_code;
3724 
3725 	rsp.scid   = cpu_to_le16(chan->dcid);
3726 	rsp.dcid   = cpu_to_le16(chan->scid);
3727 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3728 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3729 
3730 	if (chan->hs_hcon)
3731 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3732 	else
3733 		rsp_code = L2CAP_CONN_RSP;
3734 
3735 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3736 
3737 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3738 
3739 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3740 		return;
3741 
3742 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3743 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3744 	chan->num_conf_req++;
3745 }
3746 
3747 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3748 {
3749 	int type, olen;
3750 	unsigned long val;
3751 	/* Use sane default values in case a misbehaving remote device
3752 	 * did not send an RFC or extended window size option.
3753 	 */
3754 	u16 txwin_ext = chan->ack_win;
3755 	struct l2cap_conf_rfc rfc = {
3756 		.mode = chan->mode,
3757 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3758 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3759 		.max_pdu_size = cpu_to_le16(chan->imtu),
3760 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3761 	};
3762 
3763 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3764 
3765 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3766 		return;
3767 
3768 	while (len >= L2CAP_CONF_OPT_SIZE) {
3769 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3770 		if (len < 0)
3771 			break;
3772 
3773 		switch (type) {
3774 		case L2CAP_CONF_RFC:
3775 			if (olen != sizeof(rfc))
3776 				break;
3777 			memcpy(&rfc, (void *)val, olen);
3778 			break;
3779 		case L2CAP_CONF_EWS:
3780 			if (olen != 2)
3781 				break;
3782 			txwin_ext = val;
3783 			break;
3784 		}
3785 	}
3786 
3787 	switch (rfc.mode) {
3788 	case L2CAP_MODE_ERTM:
3789 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3790 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3791 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3792 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3793 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3794 		else
3795 			chan->ack_win = min_t(u16, chan->ack_win,
3796 					      rfc.txwin_size);
3797 		break;
3798 	case L2CAP_MODE_STREAMING:
3799 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3800 	}
3801 }
3802 
3803 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3804 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3805 				    u8 *data)
3806 {
3807 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3808 
3809 	if (cmd_len < sizeof(*rej))
3810 		return -EPROTO;
3811 
3812 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3813 		return 0;
3814 
3815 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3816 	    cmd->ident == conn->info_ident) {
3817 		cancel_delayed_work(&conn->info_timer);
3818 
3819 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3820 		conn->info_ident = 0;
3821 
3822 		l2cap_conn_start(conn);
3823 	}
3824 
3825 	return 0;
3826 }
3827 
3828 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3829 					struct l2cap_cmd_hdr *cmd,
3830 					u8 *data, u8 rsp_code, u8 amp_id)
3831 {
3832 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3833 	struct l2cap_conn_rsp rsp;
3834 	struct l2cap_chan *chan = NULL, *pchan;
3835 	int result, status = L2CAP_CS_NO_INFO;
3836 
3837 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3838 	__le16 psm = req->psm;
3839 
3840 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3841 
3842 	/* Check if we have socket listening on psm */
3843 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3844 					 &conn->hcon->dst, ACL_LINK);
3845 	if (!pchan) {
3846 		result = L2CAP_CR_BAD_PSM;
3847 		goto sendresp;
3848 	}
3849 
3850 	mutex_lock(&conn->chan_lock);
3851 	l2cap_chan_lock(pchan);
3852 
3853 	/* Check if the ACL is secure enough (if not SDP) */
3854 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3855 	    !hci_conn_check_link_mode(conn->hcon)) {
3856 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3857 		result = L2CAP_CR_SEC_BLOCK;
3858 		goto response;
3859 	}
3860 
3861 	result = L2CAP_CR_NO_MEM;
3862 
3863 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3864 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3865 		result = L2CAP_CR_INVALID_SCID;
3866 		goto response;
3867 	}
3868 
3869 	/* Check if we already have channel with that dcid */
3870 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3871 		result = L2CAP_CR_SCID_IN_USE;
3872 		goto response;
3873 	}
3874 
3875 	chan = pchan->ops->new_connection(pchan);
3876 	if (!chan)
3877 		goto response;
3878 
3879 	/* For certain devices (ex: HID mouse), support for authentication,
3880 	 * pairing and bonding is optional. For such devices, inorder to avoid
3881 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3882 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3883 	 */
3884 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3885 
3886 	bacpy(&chan->src, &conn->hcon->src);
3887 	bacpy(&chan->dst, &conn->hcon->dst);
3888 	chan->src_type = bdaddr_src_type(conn->hcon);
3889 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3890 	chan->psm  = psm;
3891 	chan->dcid = scid;
3892 	chan->local_amp_id = amp_id;
3893 
3894 	__l2cap_chan_add(conn, chan);
3895 
3896 	dcid = chan->scid;
3897 
3898 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3899 
3900 	chan->ident = cmd->ident;
3901 
3902 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3903 		if (l2cap_chan_check_security(chan, false)) {
3904 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3905 				l2cap_state_change(chan, BT_CONNECT2);
3906 				result = L2CAP_CR_PEND;
3907 				status = L2CAP_CS_AUTHOR_PEND;
3908 				chan->ops->defer(chan);
3909 			} else {
3910 				/* Force pending result for AMP controllers.
3911 				 * The connection will succeed after the
3912 				 * physical link is up.
3913 				 */
3914 				if (amp_id == AMP_ID_BREDR) {
3915 					l2cap_state_change(chan, BT_CONFIG);
3916 					result = L2CAP_CR_SUCCESS;
3917 				} else {
3918 					l2cap_state_change(chan, BT_CONNECT2);
3919 					result = L2CAP_CR_PEND;
3920 				}
3921 				status = L2CAP_CS_NO_INFO;
3922 			}
3923 		} else {
3924 			l2cap_state_change(chan, BT_CONNECT2);
3925 			result = L2CAP_CR_PEND;
3926 			status = L2CAP_CS_AUTHEN_PEND;
3927 		}
3928 	} else {
3929 		l2cap_state_change(chan, BT_CONNECT2);
3930 		result = L2CAP_CR_PEND;
3931 		status = L2CAP_CS_NO_INFO;
3932 	}
3933 
3934 response:
3935 	l2cap_chan_unlock(pchan);
3936 	mutex_unlock(&conn->chan_lock);
3937 	l2cap_chan_put(pchan);
3938 
3939 sendresp:
3940 	rsp.scid   = cpu_to_le16(scid);
3941 	rsp.dcid   = cpu_to_le16(dcid);
3942 	rsp.result = cpu_to_le16(result);
3943 	rsp.status = cpu_to_le16(status);
3944 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3945 
3946 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3947 		struct l2cap_info_req info;
3948 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3949 
3950 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3951 		conn->info_ident = l2cap_get_ident(conn);
3952 
3953 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3954 
3955 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3956 			       sizeof(info), &info);
3957 	}
3958 
3959 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3960 	    result == L2CAP_CR_SUCCESS) {
3961 		u8 buf[128];
3962 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3963 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3964 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3965 		chan->num_conf_req++;
3966 	}
3967 
3968 	return chan;
3969 }
3970 
3971 static int l2cap_connect_req(struct l2cap_conn *conn,
3972 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3973 {
3974 	struct hci_dev *hdev = conn->hcon->hdev;
3975 	struct hci_conn *hcon = conn->hcon;
3976 
3977 	if (cmd_len < sizeof(struct l2cap_conn_req))
3978 		return -EPROTO;
3979 
3980 	hci_dev_lock(hdev);
3981 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3982 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3983 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3984 	hci_dev_unlock(hdev);
3985 
3986 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3987 	return 0;
3988 }
3989 
3990 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3991 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3992 				    u8 *data)
3993 {
3994 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3995 	u16 scid, dcid, result, status;
3996 	struct l2cap_chan *chan;
3997 	u8 req[128];
3998 	int err;
3999 
4000 	if (cmd_len < sizeof(*rsp))
4001 		return -EPROTO;
4002 
4003 	scid   = __le16_to_cpu(rsp->scid);
4004 	dcid   = __le16_to_cpu(rsp->dcid);
4005 	result = __le16_to_cpu(rsp->result);
4006 	status = __le16_to_cpu(rsp->status);
4007 
4008 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4009 	       dcid, scid, result, status);
4010 
4011 	mutex_lock(&conn->chan_lock);
4012 
4013 	if (scid) {
4014 		chan = __l2cap_get_chan_by_scid(conn, scid);
4015 		if (!chan) {
4016 			err = -EBADSLT;
4017 			goto unlock;
4018 		}
4019 	} else {
4020 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4021 		if (!chan) {
4022 			err = -EBADSLT;
4023 			goto unlock;
4024 		}
4025 	}
4026 
4027 	err = 0;
4028 
4029 	l2cap_chan_lock(chan);
4030 
4031 	switch (result) {
4032 	case L2CAP_CR_SUCCESS:
4033 		l2cap_state_change(chan, BT_CONFIG);
4034 		chan->ident = 0;
4035 		chan->dcid = dcid;
4036 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4037 
4038 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4039 			break;
4040 
4041 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4042 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4043 		chan->num_conf_req++;
4044 		break;
4045 
4046 	case L2CAP_CR_PEND:
4047 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4048 		break;
4049 
4050 	default:
4051 		l2cap_chan_del(chan, ECONNREFUSED);
4052 		break;
4053 	}
4054 
4055 	l2cap_chan_unlock(chan);
4056 
4057 unlock:
4058 	mutex_unlock(&conn->chan_lock);
4059 
4060 	return err;
4061 }
4062 
4063 static inline void set_default_fcs(struct l2cap_chan *chan)
4064 {
4065 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4066 	 * sides request it.
4067 	 */
4068 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4069 		chan->fcs = L2CAP_FCS_NONE;
4070 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4071 		chan->fcs = L2CAP_FCS_CRC16;
4072 }
4073 
4074 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4075 				    u8 ident, u16 flags)
4076 {
4077 	struct l2cap_conn *conn = chan->conn;
4078 
4079 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4080 	       flags);
4081 
4082 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4083 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4084 
4085 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4086 		       l2cap_build_conf_rsp(chan, data,
4087 					    L2CAP_CONF_SUCCESS, flags), data);
4088 }
4089 
4090 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4091 				   u16 scid, u16 dcid)
4092 {
4093 	struct l2cap_cmd_rej_cid rej;
4094 
4095 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4096 	rej.scid = __cpu_to_le16(scid);
4097 	rej.dcid = __cpu_to_le16(dcid);
4098 
4099 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4100 }
4101 
4102 static inline int l2cap_config_req(struct l2cap_conn *conn,
4103 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4104 				   u8 *data)
4105 {
4106 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4107 	u16 dcid, flags;
4108 	u8 rsp[64];
4109 	struct l2cap_chan *chan;
4110 	int len, err = 0;
4111 
4112 	if (cmd_len < sizeof(*req))
4113 		return -EPROTO;
4114 
4115 	dcid  = __le16_to_cpu(req->dcid);
4116 	flags = __le16_to_cpu(req->flags);
4117 
4118 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4119 
4120 	chan = l2cap_get_chan_by_scid(conn, dcid);
4121 	if (!chan) {
4122 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4123 		return 0;
4124 	}
4125 
4126 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4127 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4128 				       chan->dcid);
4129 		goto unlock;
4130 	}
4131 
4132 	/* Reject if config buffer is too small. */
4133 	len = cmd_len - sizeof(*req);
4134 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4135 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4136 			       l2cap_build_conf_rsp(chan, rsp,
4137 			       L2CAP_CONF_REJECT, flags), rsp);
4138 		goto unlock;
4139 	}
4140 
4141 	/* Store config. */
4142 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4143 	chan->conf_len += len;
4144 
4145 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4146 		/* Incomplete config. Send empty response. */
4147 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4148 			       l2cap_build_conf_rsp(chan, rsp,
4149 			       L2CAP_CONF_SUCCESS, flags), rsp);
4150 		goto unlock;
4151 	}
4152 
4153 	/* Complete config. */
4154 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4155 	if (len < 0) {
4156 		l2cap_send_disconn_req(chan, ECONNRESET);
4157 		goto unlock;
4158 	}
4159 
4160 	chan->ident = cmd->ident;
4161 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4162 	chan->num_conf_rsp++;
4163 
4164 	/* Reset config buffer. */
4165 	chan->conf_len = 0;
4166 
4167 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4168 		goto unlock;
4169 
4170 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4171 		set_default_fcs(chan);
4172 
4173 		if (chan->mode == L2CAP_MODE_ERTM ||
4174 		    chan->mode == L2CAP_MODE_STREAMING)
4175 			err = l2cap_ertm_init(chan);
4176 
4177 		if (err < 0)
4178 			l2cap_send_disconn_req(chan, -err);
4179 		else
4180 			l2cap_chan_ready(chan);
4181 
4182 		goto unlock;
4183 	}
4184 
4185 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4186 		u8 buf[64];
4187 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4188 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4189 		chan->num_conf_req++;
4190 	}
4191 
4192 	/* Got Conf Rsp PENDING from remote side and assume we sent
4193 	   Conf Rsp PENDING in the code above */
4194 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4195 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4196 
4197 		/* check compatibility */
4198 
4199 		/* Send rsp for BR/EDR channel */
4200 		if (!chan->hs_hcon)
4201 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4202 		else
4203 			chan->ident = cmd->ident;
4204 	}
4205 
4206 unlock:
4207 	l2cap_chan_unlock(chan);
4208 	return err;
4209 }
4210 
4211 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4212 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4213 				   u8 *data)
4214 {
4215 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4216 	u16 scid, flags, result;
4217 	struct l2cap_chan *chan;
4218 	int len = cmd_len - sizeof(*rsp);
4219 	int err = 0;
4220 
4221 	if (cmd_len < sizeof(*rsp))
4222 		return -EPROTO;
4223 
4224 	scid   = __le16_to_cpu(rsp->scid);
4225 	flags  = __le16_to_cpu(rsp->flags);
4226 	result = __le16_to_cpu(rsp->result);
4227 
4228 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4229 	       result, len);
4230 
4231 	chan = l2cap_get_chan_by_scid(conn, scid);
4232 	if (!chan)
4233 		return 0;
4234 
4235 	switch (result) {
4236 	case L2CAP_CONF_SUCCESS:
4237 		l2cap_conf_rfc_get(chan, rsp->data, len);
4238 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4239 		break;
4240 
4241 	case L2CAP_CONF_PENDING:
4242 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4243 
4244 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4245 			char buf[64];
4246 
4247 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4248 						   buf, sizeof(buf), &result);
4249 			if (len < 0) {
4250 				l2cap_send_disconn_req(chan, ECONNRESET);
4251 				goto done;
4252 			}
4253 
4254 			if (!chan->hs_hcon) {
4255 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4256 							0);
4257 			} else {
4258 				if (l2cap_check_efs(chan)) {
4259 					amp_create_logical_link(chan);
4260 					chan->ident = cmd->ident;
4261 				}
4262 			}
4263 		}
4264 		goto done;
4265 
4266 	case L2CAP_CONF_UNACCEPT:
4267 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4268 			char req[64];
4269 
4270 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4271 				l2cap_send_disconn_req(chan, ECONNRESET);
4272 				goto done;
4273 			}
4274 
4275 			/* throw out any old stored conf requests */
4276 			result = L2CAP_CONF_SUCCESS;
4277 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4278 						   req, sizeof(req), &result);
4279 			if (len < 0) {
4280 				l2cap_send_disconn_req(chan, ECONNRESET);
4281 				goto done;
4282 			}
4283 
4284 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4285 				       L2CAP_CONF_REQ, len, req);
4286 			chan->num_conf_req++;
4287 			if (result != L2CAP_CONF_SUCCESS)
4288 				goto done;
4289 			break;
4290 		}
4291 		/* fall through */
4292 
4293 	default:
4294 		l2cap_chan_set_err(chan, ECONNRESET);
4295 
4296 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4297 		l2cap_send_disconn_req(chan, ECONNRESET);
4298 		goto done;
4299 	}
4300 
4301 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4302 		goto done;
4303 
4304 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4305 
4306 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4307 		set_default_fcs(chan);
4308 
4309 		if (chan->mode == L2CAP_MODE_ERTM ||
4310 		    chan->mode == L2CAP_MODE_STREAMING)
4311 			err = l2cap_ertm_init(chan);
4312 
4313 		if (err < 0)
4314 			l2cap_send_disconn_req(chan, -err);
4315 		else
4316 			l2cap_chan_ready(chan);
4317 	}
4318 
4319 done:
4320 	l2cap_chan_unlock(chan);
4321 	return err;
4322 }
4323 
4324 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4325 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4326 				       u8 *data)
4327 {
4328 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4329 	struct l2cap_disconn_rsp rsp;
4330 	u16 dcid, scid;
4331 	struct l2cap_chan *chan;
4332 
4333 	if (cmd_len != sizeof(*req))
4334 		return -EPROTO;
4335 
4336 	scid = __le16_to_cpu(req->scid);
4337 	dcid = __le16_to_cpu(req->dcid);
4338 
4339 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4340 
4341 	mutex_lock(&conn->chan_lock);
4342 
4343 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4344 	if (!chan) {
4345 		mutex_unlock(&conn->chan_lock);
4346 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4347 		return 0;
4348 	}
4349 
4350 	l2cap_chan_lock(chan);
4351 
4352 	rsp.dcid = cpu_to_le16(chan->scid);
4353 	rsp.scid = cpu_to_le16(chan->dcid);
4354 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4355 
4356 	chan->ops->set_shutdown(chan);
4357 
4358 	l2cap_chan_hold(chan);
4359 	l2cap_chan_del(chan, ECONNRESET);
4360 
4361 	l2cap_chan_unlock(chan);
4362 
4363 	chan->ops->close(chan);
4364 	l2cap_chan_put(chan);
4365 
4366 	mutex_unlock(&conn->chan_lock);
4367 
4368 	return 0;
4369 }
4370 
4371 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4372 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4373 				       u8 *data)
4374 {
4375 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4376 	u16 dcid, scid;
4377 	struct l2cap_chan *chan;
4378 
4379 	if (cmd_len != sizeof(*rsp))
4380 		return -EPROTO;
4381 
4382 	scid = __le16_to_cpu(rsp->scid);
4383 	dcid = __le16_to_cpu(rsp->dcid);
4384 
4385 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4386 
4387 	mutex_lock(&conn->chan_lock);
4388 
4389 	chan = __l2cap_get_chan_by_scid(conn, scid);
4390 	if (!chan) {
4391 		mutex_unlock(&conn->chan_lock);
4392 		return 0;
4393 	}
4394 
4395 	l2cap_chan_lock(chan);
4396 
4397 	l2cap_chan_hold(chan);
4398 	l2cap_chan_del(chan, 0);
4399 
4400 	l2cap_chan_unlock(chan);
4401 
4402 	chan->ops->close(chan);
4403 	l2cap_chan_put(chan);
4404 
4405 	mutex_unlock(&conn->chan_lock);
4406 
4407 	return 0;
4408 }
4409 
4410 static inline int l2cap_information_req(struct l2cap_conn *conn,
4411 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4412 					u8 *data)
4413 {
4414 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4415 	u16 type;
4416 
4417 	if (cmd_len != sizeof(*req))
4418 		return -EPROTO;
4419 
4420 	type = __le16_to_cpu(req->type);
4421 
4422 	BT_DBG("type 0x%4.4x", type);
4423 
4424 	if (type == L2CAP_IT_FEAT_MASK) {
4425 		u8 buf[8];
4426 		u32 feat_mask = l2cap_feat_mask;
4427 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4428 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4429 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4430 		if (!disable_ertm)
4431 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4432 				| L2CAP_FEAT_FCS;
4433 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4434 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4435 				| L2CAP_FEAT_EXT_WINDOW;
4436 
4437 		put_unaligned_le32(feat_mask, rsp->data);
4438 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4439 			       buf);
4440 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4441 		u8 buf[12];
4442 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4443 
4444 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4445 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4446 		rsp->data[0] = conn->local_fixed_chan;
4447 		memset(rsp->data + 1, 0, 7);
4448 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4449 			       buf);
4450 	} else {
4451 		struct l2cap_info_rsp rsp;
4452 		rsp.type   = cpu_to_le16(type);
4453 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4454 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4455 			       &rsp);
4456 	}
4457 
4458 	return 0;
4459 }
4460 
4461 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4462 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4463 					u8 *data)
4464 {
4465 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4466 	u16 type, result;
4467 
4468 	if (cmd_len < sizeof(*rsp))
4469 		return -EPROTO;
4470 
4471 	type   = __le16_to_cpu(rsp->type);
4472 	result = __le16_to_cpu(rsp->result);
4473 
4474 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4475 
4476 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4477 	if (cmd->ident != conn->info_ident ||
4478 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4479 		return 0;
4480 
4481 	cancel_delayed_work(&conn->info_timer);
4482 
4483 	if (result != L2CAP_IR_SUCCESS) {
4484 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4485 		conn->info_ident = 0;
4486 
4487 		l2cap_conn_start(conn);
4488 
4489 		return 0;
4490 	}
4491 
4492 	switch (type) {
4493 	case L2CAP_IT_FEAT_MASK:
4494 		conn->feat_mask = get_unaligned_le32(rsp->data);
4495 
4496 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4497 			struct l2cap_info_req req;
4498 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4499 
4500 			conn->info_ident = l2cap_get_ident(conn);
4501 
4502 			l2cap_send_cmd(conn, conn->info_ident,
4503 				       L2CAP_INFO_REQ, sizeof(req), &req);
4504 		} else {
4505 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4506 			conn->info_ident = 0;
4507 
4508 			l2cap_conn_start(conn);
4509 		}
4510 		break;
4511 
4512 	case L2CAP_IT_FIXED_CHAN:
4513 		conn->remote_fixed_chan = rsp->data[0];
4514 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4515 		conn->info_ident = 0;
4516 
4517 		l2cap_conn_start(conn);
4518 		break;
4519 	}
4520 
4521 	return 0;
4522 }
4523 
4524 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4525 				    struct l2cap_cmd_hdr *cmd,
4526 				    u16 cmd_len, void *data)
4527 {
4528 	struct l2cap_create_chan_req *req = data;
4529 	struct l2cap_create_chan_rsp rsp;
4530 	struct l2cap_chan *chan;
4531 	struct hci_dev *hdev;
4532 	u16 psm, scid;
4533 
4534 	if (cmd_len != sizeof(*req))
4535 		return -EPROTO;
4536 
4537 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4538 		return -EINVAL;
4539 
4540 	psm = le16_to_cpu(req->psm);
4541 	scid = le16_to_cpu(req->scid);
4542 
4543 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4544 
4545 	/* For controller id 0 make BR/EDR connection */
4546 	if (req->amp_id == AMP_ID_BREDR) {
4547 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4548 			      req->amp_id);
4549 		return 0;
4550 	}
4551 
4552 	/* Validate AMP controller id */
4553 	hdev = hci_dev_get(req->amp_id);
4554 	if (!hdev)
4555 		goto error;
4556 
4557 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4558 		hci_dev_put(hdev);
4559 		goto error;
4560 	}
4561 
4562 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4563 			     req->amp_id);
4564 	if (chan) {
4565 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4566 		struct hci_conn *hs_hcon;
4567 
4568 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4569 						  &conn->hcon->dst);
4570 		if (!hs_hcon) {
4571 			hci_dev_put(hdev);
4572 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4573 					       chan->dcid);
4574 			return 0;
4575 		}
4576 
4577 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4578 
4579 		mgr->bredr_chan = chan;
4580 		chan->hs_hcon = hs_hcon;
4581 		chan->fcs = L2CAP_FCS_NONE;
4582 		conn->mtu = hdev->block_mtu;
4583 	}
4584 
4585 	hci_dev_put(hdev);
4586 
4587 	return 0;
4588 
4589 error:
4590 	rsp.dcid = 0;
4591 	rsp.scid = cpu_to_le16(scid);
4592 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4593 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4594 
4595 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4596 		       sizeof(rsp), &rsp);
4597 
4598 	return 0;
4599 }
4600 
4601 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4602 {
4603 	struct l2cap_move_chan_req req;
4604 	u8 ident;
4605 
4606 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4607 
4608 	ident = l2cap_get_ident(chan->conn);
4609 	chan->ident = ident;
4610 
4611 	req.icid = cpu_to_le16(chan->scid);
4612 	req.dest_amp_id = dest_amp_id;
4613 
4614 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4615 		       &req);
4616 
4617 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4618 }
4619 
4620 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4621 {
4622 	struct l2cap_move_chan_rsp rsp;
4623 
4624 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4625 
4626 	rsp.icid = cpu_to_le16(chan->dcid);
4627 	rsp.result = cpu_to_le16(result);
4628 
4629 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4630 		       sizeof(rsp), &rsp);
4631 }
4632 
4633 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4634 {
4635 	struct l2cap_move_chan_cfm cfm;
4636 
4637 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4638 
4639 	chan->ident = l2cap_get_ident(chan->conn);
4640 
4641 	cfm.icid = cpu_to_le16(chan->scid);
4642 	cfm.result = cpu_to_le16(result);
4643 
4644 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4645 		       sizeof(cfm), &cfm);
4646 
4647 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4648 }
4649 
4650 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4651 {
4652 	struct l2cap_move_chan_cfm cfm;
4653 
4654 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4655 
4656 	cfm.icid = cpu_to_le16(icid);
4657 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4658 
4659 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4660 		       sizeof(cfm), &cfm);
4661 }
4662 
4663 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4664 					 u16 icid)
4665 {
4666 	struct l2cap_move_chan_cfm_rsp rsp;
4667 
4668 	BT_DBG("icid 0x%4.4x", icid);
4669 
4670 	rsp.icid = cpu_to_le16(icid);
4671 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4672 }
4673 
4674 static void __release_logical_link(struct l2cap_chan *chan)
4675 {
4676 	chan->hs_hchan = NULL;
4677 	chan->hs_hcon = NULL;
4678 
4679 	/* Placeholder - release the logical link */
4680 }
4681 
4682 static void l2cap_logical_fail(struct l2cap_chan *chan)
4683 {
4684 	/* Logical link setup failed */
4685 	if (chan->state != BT_CONNECTED) {
4686 		/* Create channel failure, disconnect */
4687 		l2cap_send_disconn_req(chan, ECONNRESET);
4688 		return;
4689 	}
4690 
4691 	switch (chan->move_role) {
4692 	case L2CAP_MOVE_ROLE_RESPONDER:
4693 		l2cap_move_done(chan);
4694 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4695 		break;
4696 	case L2CAP_MOVE_ROLE_INITIATOR:
4697 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4698 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4699 			/* Remote has only sent pending or
4700 			 * success responses, clean up
4701 			 */
4702 			l2cap_move_done(chan);
4703 		}
4704 
4705 		/* Other amp move states imply that the move
4706 		 * has already aborted
4707 		 */
4708 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4709 		break;
4710 	}
4711 }
4712 
4713 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4714 					struct hci_chan *hchan)
4715 {
4716 	struct l2cap_conf_rsp rsp;
4717 
4718 	chan->hs_hchan = hchan;
4719 	chan->hs_hcon->l2cap_data = chan->conn;
4720 
4721 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4722 
4723 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4724 		int err;
4725 
4726 		set_default_fcs(chan);
4727 
4728 		err = l2cap_ertm_init(chan);
4729 		if (err < 0)
4730 			l2cap_send_disconn_req(chan, -err);
4731 		else
4732 			l2cap_chan_ready(chan);
4733 	}
4734 }
4735 
4736 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4737 				      struct hci_chan *hchan)
4738 {
4739 	chan->hs_hcon = hchan->conn;
4740 	chan->hs_hcon->l2cap_data = chan->conn;
4741 
4742 	BT_DBG("move_state %d", chan->move_state);
4743 
4744 	switch (chan->move_state) {
4745 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4746 		/* Move confirm will be sent after a success
4747 		 * response is received
4748 		 */
4749 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4750 		break;
4751 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4752 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4753 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4754 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4755 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4756 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4757 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4758 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4759 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4760 		}
4761 		break;
4762 	default:
4763 		/* Move was not in expected state, free the channel */
4764 		__release_logical_link(chan);
4765 
4766 		chan->move_state = L2CAP_MOVE_STABLE;
4767 	}
4768 }
4769 
4770 /* Call with chan locked */
4771 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4772 		       u8 status)
4773 {
4774 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4775 
4776 	if (status) {
4777 		l2cap_logical_fail(chan);
4778 		__release_logical_link(chan);
4779 		return;
4780 	}
4781 
4782 	if (chan->state != BT_CONNECTED) {
4783 		/* Ignore logical link if channel is on BR/EDR */
4784 		if (chan->local_amp_id != AMP_ID_BREDR)
4785 			l2cap_logical_finish_create(chan, hchan);
4786 	} else {
4787 		l2cap_logical_finish_move(chan, hchan);
4788 	}
4789 }
4790 
4791 void l2cap_move_start(struct l2cap_chan *chan)
4792 {
4793 	BT_DBG("chan %p", chan);
4794 
4795 	if (chan->local_amp_id == AMP_ID_BREDR) {
4796 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4797 			return;
4798 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4799 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4800 		/* Placeholder - start physical link setup */
4801 	} else {
4802 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4803 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4804 		chan->move_id = 0;
4805 		l2cap_move_setup(chan);
4806 		l2cap_send_move_chan_req(chan, 0);
4807 	}
4808 }
4809 
4810 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4811 			    u8 local_amp_id, u8 remote_amp_id)
4812 {
4813 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4814 	       local_amp_id, remote_amp_id);
4815 
4816 	chan->fcs = L2CAP_FCS_NONE;
4817 
4818 	/* Outgoing channel on AMP */
4819 	if (chan->state == BT_CONNECT) {
4820 		if (result == L2CAP_CR_SUCCESS) {
4821 			chan->local_amp_id = local_amp_id;
4822 			l2cap_send_create_chan_req(chan, remote_amp_id);
4823 		} else {
4824 			/* Revert to BR/EDR connect */
4825 			l2cap_send_conn_req(chan);
4826 		}
4827 
4828 		return;
4829 	}
4830 
4831 	/* Incoming channel on AMP */
4832 	if (__l2cap_no_conn_pending(chan)) {
4833 		struct l2cap_conn_rsp rsp;
4834 		char buf[128];
4835 		rsp.scid = cpu_to_le16(chan->dcid);
4836 		rsp.dcid = cpu_to_le16(chan->scid);
4837 
4838 		if (result == L2CAP_CR_SUCCESS) {
4839 			/* Send successful response */
4840 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4841 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4842 		} else {
4843 			/* Send negative response */
4844 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4845 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4846 		}
4847 
4848 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4849 			       sizeof(rsp), &rsp);
4850 
4851 		if (result == L2CAP_CR_SUCCESS) {
4852 			l2cap_state_change(chan, BT_CONFIG);
4853 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4854 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4855 				       L2CAP_CONF_REQ,
4856 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4857 			chan->num_conf_req++;
4858 		}
4859 	}
4860 }
4861 
4862 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4863 				   u8 remote_amp_id)
4864 {
4865 	l2cap_move_setup(chan);
4866 	chan->move_id = local_amp_id;
4867 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4868 
4869 	l2cap_send_move_chan_req(chan, remote_amp_id);
4870 }
4871 
4872 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4873 {
4874 	struct hci_chan *hchan = NULL;
4875 
4876 	/* Placeholder - get hci_chan for logical link */
4877 
4878 	if (hchan) {
4879 		if (hchan->state == BT_CONNECTED) {
4880 			/* Logical link is ready to go */
4881 			chan->hs_hcon = hchan->conn;
4882 			chan->hs_hcon->l2cap_data = chan->conn;
4883 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4884 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4885 
4886 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4887 		} else {
4888 			/* Wait for logical link to be ready */
4889 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4890 		}
4891 	} else {
4892 		/* Logical link not available */
4893 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4894 	}
4895 }
4896 
4897 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4898 {
4899 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4900 		u8 rsp_result;
4901 		if (result == -EINVAL)
4902 			rsp_result = L2CAP_MR_BAD_ID;
4903 		else
4904 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4905 
4906 		l2cap_send_move_chan_rsp(chan, rsp_result);
4907 	}
4908 
4909 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4910 	chan->move_state = L2CAP_MOVE_STABLE;
4911 
4912 	/* Restart data transmission */
4913 	l2cap_ertm_send(chan);
4914 }
4915 
4916 /* Invoke with locked chan */
4917 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4918 {
4919 	u8 local_amp_id = chan->local_amp_id;
4920 	u8 remote_amp_id = chan->remote_amp_id;
4921 
4922 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4923 	       chan, result, local_amp_id, remote_amp_id);
4924 
4925 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4926 		l2cap_chan_unlock(chan);
4927 		return;
4928 	}
4929 
4930 	if (chan->state != BT_CONNECTED) {
4931 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4932 	} else if (result != L2CAP_MR_SUCCESS) {
4933 		l2cap_do_move_cancel(chan, result);
4934 	} else {
4935 		switch (chan->move_role) {
4936 		case L2CAP_MOVE_ROLE_INITIATOR:
4937 			l2cap_do_move_initiate(chan, local_amp_id,
4938 					       remote_amp_id);
4939 			break;
4940 		case L2CAP_MOVE_ROLE_RESPONDER:
4941 			l2cap_do_move_respond(chan, result);
4942 			break;
4943 		default:
4944 			l2cap_do_move_cancel(chan, result);
4945 			break;
4946 		}
4947 	}
4948 }
4949 
4950 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4951 					 struct l2cap_cmd_hdr *cmd,
4952 					 u16 cmd_len, void *data)
4953 {
4954 	struct l2cap_move_chan_req *req = data;
4955 	struct l2cap_move_chan_rsp rsp;
4956 	struct l2cap_chan *chan;
4957 	u16 icid = 0;
4958 	u16 result = L2CAP_MR_NOT_ALLOWED;
4959 
4960 	if (cmd_len != sizeof(*req))
4961 		return -EPROTO;
4962 
4963 	icid = le16_to_cpu(req->icid);
4964 
4965 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4966 
4967 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4968 		return -EINVAL;
4969 
4970 	chan = l2cap_get_chan_by_dcid(conn, icid);
4971 	if (!chan) {
4972 		rsp.icid = cpu_to_le16(icid);
4973 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4974 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4975 			       sizeof(rsp), &rsp);
4976 		return 0;
4977 	}
4978 
4979 	chan->ident = cmd->ident;
4980 
4981 	if (chan->scid < L2CAP_CID_DYN_START ||
4982 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4983 	    (chan->mode != L2CAP_MODE_ERTM &&
4984 	     chan->mode != L2CAP_MODE_STREAMING)) {
4985 		result = L2CAP_MR_NOT_ALLOWED;
4986 		goto send_move_response;
4987 	}
4988 
4989 	if (chan->local_amp_id == req->dest_amp_id) {
4990 		result = L2CAP_MR_SAME_ID;
4991 		goto send_move_response;
4992 	}
4993 
4994 	if (req->dest_amp_id != AMP_ID_BREDR) {
4995 		struct hci_dev *hdev;
4996 		hdev = hci_dev_get(req->dest_amp_id);
4997 		if (!hdev || hdev->dev_type != HCI_AMP ||
4998 		    !test_bit(HCI_UP, &hdev->flags)) {
4999 			if (hdev)
5000 				hci_dev_put(hdev);
5001 
5002 			result = L2CAP_MR_BAD_ID;
5003 			goto send_move_response;
5004 		}
5005 		hci_dev_put(hdev);
5006 	}
5007 
5008 	/* Detect a move collision.  Only send a collision response
5009 	 * if this side has "lost", otherwise proceed with the move.
5010 	 * The winner has the larger bd_addr.
5011 	 */
5012 	if ((__chan_is_moving(chan) ||
5013 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5014 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5015 		result = L2CAP_MR_COLLISION;
5016 		goto send_move_response;
5017 	}
5018 
5019 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5020 	l2cap_move_setup(chan);
5021 	chan->move_id = req->dest_amp_id;
5022 	icid = chan->dcid;
5023 
5024 	if (req->dest_amp_id == AMP_ID_BREDR) {
5025 		/* Moving to BR/EDR */
5026 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5027 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5028 			result = L2CAP_MR_PEND;
5029 		} else {
5030 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5031 			result = L2CAP_MR_SUCCESS;
5032 		}
5033 	} else {
5034 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5035 		/* Placeholder - uncomment when amp functions are available */
5036 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5037 		result = L2CAP_MR_PEND;
5038 	}
5039 
5040 send_move_response:
5041 	l2cap_send_move_chan_rsp(chan, result);
5042 
5043 	l2cap_chan_unlock(chan);
5044 
5045 	return 0;
5046 }
5047 
5048 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5049 {
5050 	struct l2cap_chan *chan;
5051 	struct hci_chan *hchan = NULL;
5052 
5053 	chan = l2cap_get_chan_by_scid(conn, icid);
5054 	if (!chan) {
5055 		l2cap_send_move_chan_cfm_icid(conn, icid);
5056 		return;
5057 	}
5058 
5059 	__clear_chan_timer(chan);
5060 	if (result == L2CAP_MR_PEND)
5061 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5062 
5063 	switch (chan->move_state) {
5064 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5065 		/* Move confirm will be sent when logical link
5066 		 * is complete.
5067 		 */
5068 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5069 		break;
5070 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5071 		if (result == L2CAP_MR_PEND) {
5072 			break;
5073 		} else if (test_bit(CONN_LOCAL_BUSY,
5074 				    &chan->conn_state)) {
5075 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5076 		} else {
5077 			/* Logical link is up or moving to BR/EDR,
5078 			 * proceed with move
5079 			 */
5080 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5081 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5082 		}
5083 		break;
5084 	case L2CAP_MOVE_WAIT_RSP:
5085 		/* Moving to AMP */
5086 		if (result == L2CAP_MR_SUCCESS) {
5087 			/* Remote is ready, send confirm immediately
5088 			 * after logical link is ready
5089 			 */
5090 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5091 		} else {
5092 			/* Both logical link and move success
5093 			 * are required to confirm
5094 			 */
5095 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5096 		}
5097 
5098 		/* Placeholder - get hci_chan for logical link */
5099 		if (!hchan) {
5100 			/* Logical link not available */
5101 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5102 			break;
5103 		}
5104 
5105 		/* If the logical link is not yet connected, do not
5106 		 * send confirmation.
5107 		 */
5108 		if (hchan->state != BT_CONNECTED)
5109 			break;
5110 
5111 		/* Logical link is already ready to go */
5112 
5113 		chan->hs_hcon = hchan->conn;
5114 		chan->hs_hcon->l2cap_data = chan->conn;
5115 
5116 		if (result == L2CAP_MR_SUCCESS) {
5117 			/* Can confirm now */
5118 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5119 		} else {
5120 			/* Now only need move success
5121 			 * to confirm
5122 			 */
5123 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5124 		}
5125 
5126 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5127 		break;
5128 	default:
5129 		/* Any other amp move state means the move failed. */
5130 		chan->move_id = chan->local_amp_id;
5131 		l2cap_move_done(chan);
5132 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5133 	}
5134 
5135 	l2cap_chan_unlock(chan);
5136 }
5137 
5138 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5139 			    u16 result)
5140 {
5141 	struct l2cap_chan *chan;
5142 
5143 	chan = l2cap_get_chan_by_ident(conn, ident);
5144 	if (!chan) {
5145 		/* Could not locate channel, icid is best guess */
5146 		l2cap_send_move_chan_cfm_icid(conn, icid);
5147 		return;
5148 	}
5149 
5150 	__clear_chan_timer(chan);
5151 
5152 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5153 		if (result == L2CAP_MR_COLLISION) {
5154 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5155 		} else {
5156 			/* Cleanup - cancel move */
5157 			chan->move_id = chan->local_amp_id;
5158 			l2cap_move_done(chan);
5159 		}
5160 	}
5161 
5162 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5163 
5164 	l2cap_chan_unlock(chan);
5165 }
5166 
5167 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5168 				  struct l2cap_cmd_hdr *cmd,
5169 				  u16 cmd_len, void *data)
5170 {
5171 	struct l2cap_move_chan_rsp *rsp = data;
5172 	u16 icid, result;
5173 
5174 	if (cmd_len != sizeof(*rsp))
5175 		return -EPROTO;
5176 
5177 	icid = le16_to_cpu(rsp->icid);
5178 	result = le16_to_cpu(rsp->result);
5179 
5180 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5181 
5182 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5183 		l2cap_move_continue(conn, icid, result);
5184 	else
5185 		l2cap_move_fail(conn, cmd->ident, icid, result);
5186 
5187 	return 0;
5188 }
5189 
5190 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5191 				      struct l2cap_cmd_hdr *cmd,
5192 				      u16 cmd_len, void *data)
5193 {
5194 	struct l2cap_move_chan_cfm *cfm = data;
5195 	struct l2cap_chan *chan;
5196 	u16 icid, result;
5197 
5198 	if (cmd_len != sizeof(*cfm))
5199 		return -EPROTO;
5200 
5201 	icid = le16_to_cpu(cfm->icid);
5202 	result = le16_to_cpu(cfm->result);
5203 
5204 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5205 
5206 	chan = l2cap_get_chan_by_dcid(conn, icid);
5207 	if (!chan) {
5208 		/* Spec requires a response even if the icid was not found */
5209 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5210 		return 0;
5211 	}
5212 
5213 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5214 		if (result == L2CAP_MC_CONFIRMED) {
5215 			chan->local_amp_id = chan->move_id;
5216 			if (chan->local_amp_id == AMP_ID_BREDR)
5217 				__release_logical_link(chan);
5218 		} else {
5219 			chan->move_id = chan->local_amp_id;
5220 		}
5221 
5222 		l2cap_move_done(chan);
5223 	}
5224 
5225 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5226 
5227 	l2cap_chan_unlock(chan);
5228 
5229 	return 0;
5230 }
5231 
5232 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5233 						 struct l2cap_cmd_hdr *cmd,
5234 						 u16 cmd_len, void *data)
5235 {
5236 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5237 	struct l2cap_chan *chan;
5238 	u16 icid;
5239 
5240 	if (cmd_len != sizeof(*rsp))
5241 		return -EPROTO;
5242 
5243 	icid = le16_to_cpu(rsp->icid);
5244 
5245 	BT_DBG("icid 0x%4.4x", icid);
5246 
5247 	chan = l2cap_get_chan_by_scid(conn, icid);
5248 	if (!chan)
5249 		return 0;
5250 
5251 	__clear_chan_timer(chan);
5252 
5253 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5254 		chan->local_amp_id = chan->move_id;
5255 
5256 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5257 			__release_logical_link(chan);
5258 
5259 		l2cap_move_done(chan);
5260 	}
5261 
5262 	l2cap_chan_unlock(chan);
5263 
5264 	return 0;
5265 }
5266 
5267 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5268 					      struct l2cap_cmd_hdr *cmd,
5269 					      u16 cmd_len, u8 *data)
5270 {
5271 	struct hci_conn *hcon = conn->hcon;
5272 	struct l2cap_conn_param_update_req *req;
5273 	struct l2cap_conn_param_update_rsp rsp;
5274 	u16 min, max, latency, to_multiplier;
5275 	int err;
5276 
5277 	if (hcon->role != HCI_ROLE_MASTER)
5278 		return -EINVAL;
5279 
5280 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5281 		return -EPROTO;
5282 
5283 	req = (struct l2cap_conn_param_update_req *) data;
5284 	min		= __le16_to_cpu(req->min);
5285 	max		= __le16_to_cpu(req->max);
5286 	latency		= __le16_to_cpu(req->latency);
5287 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5288 
5289 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5290 	       min, max, latency, to_multiplier);
5291 
5292 	memset(&rsp, 0, sizeof(rsp));
5293 
5294 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5295 	if (err)
5296 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5297 	else
5298 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5299 
5300 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5301 		       sizeof(rsp), &rsp);
5302 
5303 	if (!err) {
5304 		u8 store_hint;
5305 
5306 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5307 						to_multiplier);
5308 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5309 				    store_hint, min, max, latency,
5310 				    to_multiplier);
5311 
5312 	}
5313 
5314 	return 0;
5315 }
5316 
5317 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5318 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5319 				u8 *data)
5320 {
5321 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5322 	struct hci_conn *hcon = conn->hcon;
5323 	u16 dcid, mtu, mps, credits, result;
5324 	struct l2cap_chan *chan;
5325 	int err, sec_level;
5326 
5327 	if (cmd_len < sizeof(*rsp))
5328 		return -EPROTO;
5329 
5330 	dcid    = __le16_to_cpu(rsp->dcid);
5331 	mtu     = __le16_to_cpu(rsp->mtu);
5332 	mps     = __le16_to_cpu(rsp->mps);
5333 	credits = __le16_to_cpu(rsp->credits);
5334 	result  = __le16_to_cpu(rsp->result);
5335 
5336 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5337 					   dcid < L2CAP_CID_DYN_START ||
5338 					   dcid > L2CAP_CID_LE_DYN_END))
5339 		return -EPROTO;
5340 
5341 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5342 	       dcid, mtu, mps, credits, result);
5343 
5344 	mutex_lock(&conn->chan_lock);
5345 
5346 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5347 	if (!chan) {
5348 		err = -EBADSLT;
5349 		goto unlock;
5350 	}
5351 
5352 	err = 0;
5353 
5354 	l2cap_chan_lock(chan);
5355 
5356 	switch (result) {
5357 	case L2CAP_CR_LE_SUCCESS:
5358 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5359 			err = -EBADSLT;
5360 			break;
5361 		}
5362 
5363 		chan->ident = 0;
5364 		chan->dcid = dcid;
5365 		chan->omtu = mtu;
5366 		chan->remote_mps = mps;
5367 		chan->tx_credits = credits;
5368 		l2cap_chan_ready(chan);
5369 		break;
5370 
5371 	case L2CAP_CR_LE_AUTHENTICATION:
5372 	case L2CAP_CR_LE_ENCRYPTION:
5373 		/* If we already have MITM protection we can't do
5374 		 * anything.
5375 		 */
5376 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5377 			l2cap_chan_del(chan, ECONNREFUSED);
5378 			break;
5379 		}
5380 
5381 		sec_level = hcon->sec_level + 1;
5382 		if (chan->sec_level < sec_level)
5383 			chan->sec_level = sec_level;
5384 
5385 		/* We'll need to send a new Connect Request */
5386 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5387 
5388 		smp_conn_security(hcon, chan->sec_level);
5389 		break;
5390 
5391 	default:
5392 		l2cap_chan_del(chan, ECONNREFUSED);
5393 		break;
5394 	}
5395 
5396 	l2cap_chan_unlock(chan);
5397 
5398 unlock:
5399 	mutex_unlock(&conn->chan_lock);
5400 
5401 	return err;
5402 }
5403 
5404 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5405 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5406 				      u8 *data)
5407 {
5408 	int err = 0;
5409 
5410 	switch (cmd->code) {
5411 	case L2CAP_COMMAND_REJ:
5412 		l2cap_command_rej(conn, cmd, cmd_len, data);
5413 		break;
5414 
5415 	case L2CAP_CONN_REQ:
5416 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5417 		break;
5418 
5419 	case L2CAP_CONN_RSP:
5420 	case L2CAP_CREATE_CHAN_RSP:
5421 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5422 		break;
5423 
5424 	case L2CAP_CONF_REQ:
5425 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5426 		break;
5427 
5428 	case L2CAP_CONF_RSP:
5429 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5430 		break;
5431 
5432 	case L2CAP_DISCONN_REQ:
5433 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_DISCONN_RSP:
5437 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_ECHO_REQ:
5441 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5442 		break;
5443 
5444 	case L2CAP_ECHO_RSP:
5445 		break;
5446 
5447 	case L2CAP_INFO_REQ:
5448 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5449 		break;
5450 
5451 	case L2CAP_INFO_RSP:
5452 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5453 		break;
5454 
5455 	case L2CAP_CREATE_CHAN_REQ:
5456 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5457 		break;
5458 
5459 	case L2CAP_MOVE_CHAN_REQ:
5460 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5461 		break;
5462 
5463 	case L2CAP_MOVE_CHAN_RSP:
5464 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5465 		break;
5466 
5467 	case L2CAP_MOVE_CHAN_CFM:
5468 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5469 		break;
5470 
5471 	case L2CAP_MOVE_CHAN_CFM_RSP:
5472 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5473 		break;
5474 
5475 	default:
5476 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5477 		err = -EINVAL;
5478 		break;
5479 	}
5480 
5481 	return err;
5482 }
5483 
5484 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5485 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5486 				u8 *data)
5487 {
5488 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5489 	struct l2cap_le_conn_rsp rsp;
5490 	struct l2cap_chan *chan, *pchan;
5491 	u16 dcid, scid, credits, mtu, mps;
5492 	__le16 psm;
5493 	u8 result;
5494 
5495 	if (cmd_len != sizeof(*req))
5496 		return -EPROTO;
5497 
5498 	scid = __le16_to_cpu(req->scid);
5499 	mtu  = __le16_to_cpu(req->mtu);
5500 	mps  = __le16_to_cpu(req->mps);
5501 	psm  = req->psm;
5502 	dcid = 0;
5503 	credits = 0;
5504 
5505 	if (mtu < 23 || mps < 23)
5506 		return -EPROTO;
5507 
5508 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5509 	       scid, mtu, mps);
5510 
5511 	/* Check if we have socket listening on psm */
5512 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5513 					 &conn->hcon->dst, LE_LINK);
5514 	if (!pchan) {
5515 		result = L2CAP_CR_LE_BAD_PSM;
5516 		chan = NULL;
5517 		goto response;
5518 	}
5519 
5520 	mutex_lock(&conn->chan_lock);
5521 	l2cap_chan_lock(pchan);
5522 
5523 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5524 				     SMP_ALLOW_STK)) {
5525 		result = L2CAP_CR_LE_AUTHENTICATION;
5526 		chan = NULL;
5527 		goto response_unlock;
5528 	}
5529 
5530 	/* Check for valid dynamic CID range */
5531 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5532 		result = L2CAP_CR_LE_INVALID_SCID;
5533 		chan = NULL;
5534 		goto response_unlock;
5535 	}
5536 
5537 	/* Check if we already have channel with that dcid */
5538 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5539 		result = L2CAP_CR_LE_SCID_IN_USE;
5540 		chan = NULL;
5541 		goto response_unlock;
5542 	}
5543 
5544 	chan = pchan->ops->new_connection(pchan);
5545 	if (!chan) {
5546 		result = L2CAP_CR_LE_NO_MEM;
5547 		goto response_unlock;
5548 	}
5549 
5550 	bacpy(&chan->src, &conn->hcon->src);
5551 	bacpy(&chan->dst, &conn->hcon->dst);
5552 	chan->src_type = bdaddr_src_type(conn->hcon);
5553 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5554 	chan->psm  = psm;
5555 	chan->dcid = scid;
5556 	chan->omtu = mtu;
5557 	chan->remote_mps = mps;
5558 
5559 	__l2cap_chan_add(conn, chan);
5560 
5561 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5562 
5563 	dcid = chan->scid;
5564 	credits = chan->rx_credits;
5565 
5566 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5567 
5568 	chan->ident = cmd->ident;
5569 
5570 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5571 		l2cap_state_change(chan, BT_CONNECT2);
5572 		/* The following result value is actually not defined
5573 		 * for LE CoC but we use it to let the function know
5574 		 * that it should bail out after doing its cleanup
5575 		 * instead of sending a response.
5576 		 */
5577 		result = L2CAP_CR_PEND;
5578 		chan->ops->defer(chan);
5579 	} else {
5580 		l2cap_chan_ready(chan);
5581 		result = L2CAP_CR_LE_SUCCESS;
5582 	}
5583 
5584 response_unlock:
5585 	l2cap_chan_unlock(pchan);
5586 	mutex_unlock(&conn->chan_lock);
5587 	l2cap_chan_put(pchan);
5588 
5589 	if (result == L2CAP_CR_PEND)
5590 		return 0;
5591 
5592 response:
5593 	if (chan) {
5594 		rsp.mtu = cpu_to_le16(chan->imtu);
5595 		rsp.mps = cpu_to_le16(chan->mps);
5596 	} else {
5597 		rsp.mtu = 0;
5598 		rsp.mps = 0;
5599 	}
5600 
5601 	rsp.dcid    = cpu_to_le16(dcid);
5602 	rsp.credits = cpu_to_le16(credits);
5603 	rsp.result  = cpu_to_le16(result);
5604 
5605 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5606 
5607 	return 0;
5608 }
5609 
5610 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5611 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5612 				   u8 *data)
5613 {
5614 	struct l2cap_le_credits *pkt;
5615 	struct l2cap_chan *chan;
5616 	u16 cid, credits, max_credits;
5617 
5618 	if (cmd_len != sizeof(*pkt))
5619 		return -EPROTO;
5620 
5621 	pkt = (struct l2cap_le_credits *) data;
5622 	cid	= __le16_to_cpu(pkt->cid);
5623 	credits	= __le16_to_cpu(pkt->credits);
5624 
5625 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5626 
5627 	chan = l2cap_get_chan_by_dcid(conn, cid);
5628 	if (!chan)
5629 		return -EBADSLT;
5630 
5631 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5632 	if (credits > max_credits) {
5633 		BT_ERR("LE credits overflow");
5634 		l2cap_send_disconn_req(chan, ECONNRESET);
5635 		l2cap_chan_unlock(chan);
5636 
5637 		/* Return 0 so that we don't trigger an unnecessary
5638 		 * command reject packet.
5639 		 */
5640 		return 0;
5641 	}
5642 
5643 	chan->tx_credits += credits;
5644 
5645 	/* Resume sending */
5646 	l2cap_le_flowctl_send(chan);
5647 
5648 	if (chan->tx_credits)
5649 		chan->ops->resume(chan);
5650 
5651 	l2cap_chan_unlock(chan);
5652 
5653 	return 0;
5654 }
5655 
5656 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5657 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5658 				       u8 *data)
5659 {
5660 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5661 	struct l2cap_chan *chan;
5662 
5663 	if (cmd_len < sizeof(*rej))
5664 		return -EPROTO;
5665 
5666 	mutex_lock(&conn->chan_lock);
5667 
5668 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5669 	if (!chan)
5670 		goto done;
5671 
5672 	l2cap_chan_lock(chan);
5673 	l2cap_chan_del(chan, ECONNREFUSED);
5674 	l2cap_chan_unlock(chan);
5675 
5676 done:
5677 	mutex_unlock(&conn->chan_lock);
5678 	return 0;
5679 }
5680 
5681 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5682 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5683 				   u8 *data)
5684 {
5685 	int err = 0;
5686 
5687 	switch (cmd->code) {
5688 	case L2CAP_COMMAND_REJ:
5689 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5690 		break;
5691 
5692 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5693 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5694 		break;
5695 
5696 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5697 		break;
5698 
5699 	case L2CAP_LE_CONN_RSP:
5700 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5701 		break;
5702 
5703 	case L2CAP_LE_CONN_REQ:
5704 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5705 		break;
5706 
5707 	case L2CAP_LE_CREDITS:
5708 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5709 		break;
5710 
5711 	case L2CAP_DISCONN_REQ:
5712 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_DISCONN_RSP:
5716 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	default:
5720 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5721 		err = -EINVAL;
5722 		break;
5723 	}
5724 
5725 	return err;
5726 }
5727 
5728 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5729 					struct sk_buff *skb)
5730 {
5731 	struct hci_conn *hcon = conn->hcon;
5732 	struct l2cap_cmd_hdr *cmd;
5733 	u16 len;
5734 	int err;
5735 
5736 	if (hcon->type != LE_LINK)
5737 		goto drop;
5738 
5739 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5740 		goto drop;
5741 
5742 	cmd = (void *) skb->data;
5743 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5744 
5745 	len = le16_to_cpu(cmd->len);
5746 
5747 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5748 
5749 	if (len != skb->len || !cmd->ident) {
5750 		BT_DBG("corrupted command");
5751 		goto drop;
5752 	}
5753 
5754 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5755 	if (err) {
5756 		struct l2cap_cmd_rej_unk rej;
5757 
5758 		BT_ERR("Wrong link type (%d)", err);
5759 
5760 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5761 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5762 			       sizeof(rej), &rej);
5763 	}
5764 
5765 drop:
5766 	kfree_skb(skb);
5767 }
5768 
5769 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5770 				     struct sk_buff *skb)
5771 {
5772 	struct hci_conn *hcon = conn->hcon;
5773 	u8 *data = skb->data;
5774 	int len = skb->len;
5775 	struct l2cap_cmd_hdr cmd;
5776 	int err;
5777 
5778 	l2cap_raw_recv(conn, skb);
5779 
5780 	if (hcon->type != ACL_LINK)
5781 		goto drop;
5782 
5783 	while (len >= L2CAP_CMD_HDR_SIZE) {
5784 		u16 cmd_len;
5785 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5786 		data += L2CAP_CMD_HDR_SIZE;
5787 		len  -= L2CAP_CMD_HDR_SIZE;
5788 
5789 		cmd_len = le16_to_cpu(cmd.len);
5790 
5791 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5792 		       cmd.ident);
5793 
5794 		if (cmd_len > len || !cmd.ident) {
5795 			BT_DBG("corrupted command");
5796 			break;
5797 		}
5798 
5799 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5800 		if (err) {
5801 			struct l2cap_cmd_rej_unk rej;
5802 
5803 			BT_ERR("Wrong link type (%d)", err);
5804 
5805 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5806 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5807 				       sizeof(rej), &rej);
5808 		}
5809 
5810 		data += cmd_len;
5811 		len  -= cmd_len;
5812 	}
5813 
5814 drop:
5815 	kfree_skb(skb);
5816 }
5817 
5818 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5819 {
5820 	u16 our_fcs, rcv_fcs;
5821 	int hdr_size;
5822 
5823 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5824 		hdr_size = L2CAP_EXT_HDR_SIZE;
5825 	else
5826 		hdr_size = L2CAP_ENH_HDR_SIZE;
5827 
5828 	if (chan->fcs == L2CAP_FCS_CRC16) {
5829 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5830 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5831 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5832 
5833 		if (our_fcs != rcv_fcs)
5834 			return -EBADMSG;
5835 	}
5836 	return 0;
5837 }
5838 
5839 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5840 {
5841 	struct l2cap_ctrl control;
5842 
5843 	BT_DBG("chan %p", chan);
5844 
5845 	memset(&control, 0, sizeof(control));
5846 	control.sframe = 1;
5847 	control.final = 1;
5848 	control.reqseq = chan->buffer_seq;
5849 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5850 
5851 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5852 		control.super = L2CAP_SUPER_RNR;
5853 		l2cap_send_sframe(chan, &control);
5854 	}
5855 
5856 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5857 	    chan->unacked_frames > 0)
5858 		__set_retrans_timer(chan);
5859 
5860 	/* Send pending iframes */
5861 	l2cap_ertm_send(chan);
5862 
5863 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5864 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5865 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5866 		 * send it now.
5867 		 */
5868 		control.super = L2CAP_SUPER_RR;
5869 		l2cap_send_sframe(chan, &control);
5870 	}
5871 }
5872 
5873 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5874 			    struct sk_buff **last_frag)
5875 {
5876 	/* skb->len reflects data in skb as well as all fragments
5877 	 * skb->data_len reflects only data in fragments
5878 	 */
5879 	if (!skb_has_frag_list(skb))
5880 		skb_shinfo(skb)->frag_list = new_frag;
5881 
5882 	new_frag->next = NULL;
5883 
5884 	(*last_frag)->next = new_frag;
5885 	*last_frag = new_frag;
5886 
5887 	skb->len += new_frag->len;
5888 	skb->data_len += new_frag->len;
5889 	skb->truesize += new_frag->truesize;
5890 }
5891 
5892 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5893 				struct l2cap_ctrl *control)
5894 {
5895 	int err = -EINVAL;
5896 
5897 	switch (control->sar) {
5898 	case L2CAP_SAR_UNSEGMENTED:
5899 		if (chan->sdu)
5900 			break;
5901 
5902 		err = chan->ops->recv(chan, skb);
5903 		break;
5904 
5905 	case L2CAP_SAR_START:
5906 		if (chan->sdu)
5907 			break;
5908 
5909 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5910 			break;
5911 
5912 		chan->sdu_len = get_unaligned_le16(skb->data);
5913 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5914 
5915 		if (chan->sdu_len > chan->imtu) {
5916 			err = -EMSGSIZE;
5917 			break;
5918 		}
5919 
5920 		if (skb->len >= chan->sdu_len)
5921 			break;
5922 
5923 		chan->sdu = skb;
5924 		chan->sdu_last_frag = skb;
5925 
5926 		skb = NULL;
5927 		err = 0;
5928 		break;
5929 
5930 	case L2CAP_SAR_CONTINUE:
5931 		if (!chan->sdu)
5932 			break;
5933 
5934 		append_skb_frag(chan->sdu, skb,
5935 				&chan->sdu_last_frag);
5936 		skb = NULL;
5937 
5938 		if (chan->sdu->len >= chan->sdu_len)
5939 			break;
5940 
5941 		err = 0;
5942 		break;
5943 
5944 	case L2CAP_SAR_END:
5945 		if (!chan->sdu)
5946 			break;
5947 
5948 		append_skb_frag(chan->sdu, skb,
5949 				&chan->sdu_last_frag);
5950 		skb = NULL;
5951 
5952 		if (chan->sdu->len != chan->sdu_len)
5953 			break;
5954 
5955 		err = chan->ops->recv(chan, chan->sdu);
5956 
5957 		if (!err) {
5958 			/* Reassembly complete */
5959 			chan->sdu = NULL;
5960 			chan->sdu_last_frag = NULL;
5961 			chan->sdu_len = 0;
5962 		}
5963 		break;
5964 	}
5965 
5966 	if (err) {
5967 		kfree_skb(skb);
5968 		kfree_skb(chan->sdu);
5969 		chan->sdu = NULL;
5970 		chan->sdu_last_frag = NULL;
5971 		chan->sdu_len = 0;
5972 	}
5973 
5974 	return err;
5975 }
5976 
5977 static int l2cap_resegment(struct l2cap_chan *chan)
5978 {
5979 	/* Placeholder */
5980 	return 0;
5981 }
5982 
5983 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5984 {
5985 	u8 event;
5986 
5987 	if (chan->mode != L2CAP_MODE_ERTM)
5988 		return;
5989 
5990 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5991 	l2cap_tx(chan, NULL, NULL, event);
5992 }
5993 
5994 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5995 {
5996 	int err = 0;
5997 	/* Pass sequential frames to l2cap_reassemble_sdu()
5998 	 * until a gap is encountered.
5999 	 */
6000 
6001 	BT_DBG("chan %p", chan);
6002 
6003 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6004 		struct sk_buff *skb;
6005 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6006 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6007 
6008 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6009 
6010 		if (!skb)
6011 			break;
6012 
6013 		skb_unlink(skb, &chan->srej_q);
6014 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6015 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6016 		if (err)
6017 			break;
6018 	}
6019 
6020 	if (skb_queue_empty(&chan->srej_q)) {
6021 		chan->rx_state = L2CAP_RX_STATE_RECV;
6022 		l2cap_send_ack(chan);
6023 	}
6024 
6025 	return err;
6026 }
6027 
6028 static void l2cap_handle_srej(struct l2cap_chan *chan,
6029 			      struct l2cap_ctrl *control)
6030 {
6031 	struct sk_buff *skb;
6032 
6033 	BT_DBG("chan %p, control %p", chan, control);
6034 
6035 	if (control->reqseq == chan->next_tx_seq) {
6036 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6037 		l2cap_send_disconn_req(chan, ECONNRESET);
6038 		return;
6039 	}
6040 
6041 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6042 
6043 	if (skb == NULL) {
6044 		BT_DBG("Seq %d not available for retransmission",
6045 		       control->reqseq);
6046 		return;
6047 	}
6048 
6049 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6050 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6051 		l2cap_send_disconn_req(chan, ECONNRESET);
6052 		return;
6053 	}
6054 
6055 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6056 
6057 	if (control->poll) {
6058 		l2cap_pass_to_tx(chan, control);
6059 
6060 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6061 		l2cap_retransmit(chan, control);
6062 		l2cap_ertm_send(chan);
6063 
6064 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6065 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6066 			chan->srej_save_reqseq = control->reqseq;
6067 		}
6068 	} else {
6069 		l2cap_pass_to_tx_fbit(chan, control);
6070 
6071 		if (control->final) {
6072 			if (chan->srej_save_reqseq != control->reqseq ||
6073 			    !test_and_clear_bit(CONN_SREJ_ACT,
6074 						&chan->conn_state))
6075 				l2cap_retransmit(chan, control);
6076 		} else {
6077 			l2cap_retransmit(chan, control);
6078 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6079 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6080 				chan->srej_save_reqseq = control->reqseq;
6081 			}
6082 		}
6083 	}
6084 }
6085 
6086 static void l2cap_handle_rej(struct l2cap_chan *chan,
6087 			     struct l2cap_ctrl *control)
6088 {
6089 	struct sk_buff *skb;
6090 
6091 	BT_DBG("chan %p, control %p", chan, control);
6092 
6093 	if (control->reqseq == chan->next_tx_seq) {
6094 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6095 		l2cap_send_disconn_req(chan, ECONNRESET);
6096 		return;
6097 	}
6098 
6099 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6100 
6101 	if (chan->max_tx && skb &&
6102 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6103 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6104 		l2cap_send_disconn_req(chan, ECONNRESET);
6105 		return;
6106 	}
6107 
6108 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6109 
6110 	l2cap_pass_to_tx(chan, control);
6111 
6112 	if (control->final) {
6113 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6114 			l2cap_retransmit_all(chan, control);
6115 	} else {
6116 		l2cap_retransmit_all(chan, control);
6117 		l2cap_ertm_send(chan);
6118 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6119 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6120 	}
6121 }
6122 
6123 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6124 {
6125 	BT_DBG("chan %p, txseq %d", chan, txseq);
6126 
6127 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6128 	       chan->expected_tx_seq);
6129 
6130 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6131 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6132 		    chan->tx_win) {
6133 			/* See notes below regarding "double poll" and
6134 			 * invalid packets.
6135 			 */
6136 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6137 				BT_DBG("Invalid/Ignore - after SREJ");
6138 				return L2CAP_TXSEQ_INVALID_IGNORE;
6139 			} else {
6140 				BT_DBG("Invalid - in window after SREJ sent");
6141 				return L2CAP_TXSEQ_INVALID;
6142 			}
6143 		}
6144 
6145 		if (chan->srej_list.head == txseq) {
6146 			BT_DBG("Expected SREJ");
6147 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6148 		}
6149 
6150 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6151 			BT_DBG("Duplicate SREJ - txseq already stored");
6152 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6153 		}
6154 
6155 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6156 			BT_DBG("Unexpected SREJ - not requested");
6157 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6158 		}
6159 	}
6160 
6161 	if (chan->expected_tx_seq == txseq) {
6162 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6163 		    chan->tx_win) {
6164 			BT_DBG("Invalid - txseq outside tx window");
6165 			return L2CAP_TXSEQ_INVALID;
6166 		} else {
6167 			BT_DBG("Expected");
6168 			return L2CAP_TXSEQ_EXPECTED;
6169 		}
6170 	}
6171 
6172 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6173 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6174 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6175 		return L2CAP_TXSEQ_DUPLICATE;
6176 	}
6177 
6178 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6179 		/* A source of invalid packets is a "double poll" condition,
6180 		 * where delays cause us to send multiple poll packets.  If
6181 		 * the remote stack receives and processes both polls,
6182 		 * sequence numbers can wrap around in such a way that a
6183 		 * resent frame has a sequence number that looks like new data
6184 		 * with a sequence gap.  This would trigger an erroneous SREJ
6185 		 * request.
6186 		 *
6187 		 * Fortunately, this is impossible with a tx window that's
6188 		 * less than half of the maximum sequence number, which allows
6189 		 * invalid frames to be safely ignored.
6190 		 *
6191 		 * With tx window sizes greater than half of the tx window
6192 		 * maximum, the frame is invalid and cannot be ignored.  This
6193 		 * causes a disconnect.
6194 		 */
6195 
6196 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6197 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6198 			return L2CAP_TXSEQ_INVALID_IGNORE;
6199 		} else {
6200 			BT_DBG("Invalid - txseq outside tx window");
6201 			return L2CAP_TXSEQ_INVALID;
6202 		}
6203 	} else {
6204 		BT_DBG("Unexpected - txseq indicates missing frames");
6205 		return L2CAP_TXSEQ_UNEXPECTED;
6206 	}
6207 }
6208 
6209 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6210 			       struct l2cap_ctrl *control,
6211 			       struct sk_buff *skb, u8 event)
6212 {
6213 	int err = 0;
6214 	bool skb_in_use = false;
6215 
6216 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6217 	       event);
6218 
6219 	switch (event) {
6220 	case L2CAP_EV_RECV_IFRAME:
6221 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6222 		case L2CAP_TXSEQ_EXPECTED:
6223 			l2cap_pass_to_tx(chan, control);
6224 
6225 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6226 				BT_DBG("Busy, discarding expected seq %d",
6227 				       control->txseq);
6228 				break;
6229 			}
6230 
6231 			chan->expected_tx_seq = __next_seq(chan,
6232 							   control->txseq);
6233 
6234 			chan->buffer_seq = chan->expected_tx_seq;
6235 			skb_in_use = true;
6236 
6237 			err = l2cap_reassemble_sdu(chan, skb, control);
6238 			if (err)
6239 				break;
6240 
6241 			if (control->final) {
6242 				if (!test_and_clear_bit(CONN_REJ_ACT,
6243 							&chan->conn_state)) {
6244 					control->final = 0;
6245 					l2cap_retransmit_all(chan, control);
6246 					l2cap_ertm_send(chan);
6247 				}
6248 			}
6249 
6250 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6251 				l2cap_send_ack(chan);
6252 			break;
6253 		case L2CAP_TXSEQ_UNEXPECTED:
6254 			l2cap_pass_to_tx(chan, control);
6255 
6256 			/* Can't issue SREJ frames in the local busy state.
6257 			 * Drop this frame, it will be seen as missing
6258 			 * when local busy is exited.
6259 			 */
6260 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6261 				BT_DBG("Busy, discarding unexpected seq %d",
6262 				       control->txseq);
6263 				break;
6264 			}
6265 
6266 			/* There was a gap in the sequence, so an SREJ
6267 			 * must be sent for each missing frame.  The
6268 			 * current frame is stored for later use.
6269 			 */
6270 			skb_queue_tail(&chan->srej_q, skb);
6271 			skb_in_use = true;
6272 			BT_DBG("Queued %p (queue len %d)", skb,
6273 			       skb_queue_len(&chan->srej_q));
6274 
6275 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6276 			l2cap_seq_list_clear(&chan->srej_list);
6277 			l2cap_send_srej(chan, control->txseq);
6278 
6279 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6280 			break;
6281 		case L2CAP_TXSEQ_DUPLICATE:
6282 			l2cap_pass_to_tx(chan, control);
6283 			break;
6284 		case L2CAP_TXSEQ_INVALID_IGNORE:
6285 			break;
6286 		case L2CAP_TXSEQ_INVALID:
6287 		default:
6288 			l2cap_send_disconn_req(chan, ECONNRESET);
6289 			break;
6290 		}
6291 		break;
6292 	case L2CAP_EV_RECV_RR:
6293 		l2cap_pass_to_tx(chan, control);
6294 		if (control->final) {
6295 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6296 
6297 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6298 			    !__chan_is_moving(chan)) {
6299 				control->final = 0;
6300 				l2cap_retransmit_all(chan, control);
6301 			}
6302 
6303 			l2cap_ertm_send(chan);
6304 		} else if (control->poll) {
6305 			l2cap_send_i_or_rr_or_rnr(chan);
6306 		} else {
6307 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6308 					       &chan->conn_state) &&
6309 			    chan->unacked_frames)
6310 				__set_retrans_timer(chan);
6311 
6312 			l2cap_ertm_send(chan);
6313 		}
6314 		break;
6315 	case L2CAP_EV_RECV_RNR:
6316 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6317 		l2cap_pass_to_tx(chan, control);
6318 		if (control && control->poll) {
6319 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6320 			l2cap_send_rr_or_rnr(chan, 0);
6321 		}
6322 		__clear_retrans_timer(chan);
6323 		l2cap_seq_list_clear(&chan->retrans_list);
6324 		break;
6325 	case L2CAP_EV_RECV_REJ:
6326 		l2cap_handle_rej(chan, control);
6327 		break;
6328 	case L2CAP_EV_RECV_SREJ:
6329 		l2cap_handle_srej(chan, control);
6330 		break;
6331 	default:
6332 		break;
6333 	}
6334 
6335 	if (skb && !skb_in_use) {
6336 		BT_DBG("Freeing %p", skb);
6337 		kfree_skb(skb);
6338 	}
6339 
6340 	return err;
6341 }
6342 
6343 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6344 				    struct l2cap_ctrl *control,
6345 				    struct sk_buff *skb, u8 event)
6346 {
6347 	int err = 0;
6348 	u16 txseq = control->txseq;
6349 	bool skb_in_use = false;
6350 
6351 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6352 	       event);
6353 
6354 	switch (event) {
6355 	case L2CAP_EV_RECV_IFRAME:
6356 		switch (l2cap_classify_txseq(chan, txseq)) {
6357 		case L2CAP_TXSEQ_EXPECTED:
6358 			/* Keep frame for reassembly later */
6359 			l2cap_pass_to_tx(chan, control);
6360 			skb_queue_tail(&chan->srej_q, skb);
6361 			skb_in_use = true;
6362 			BT_DBG("Queued %p (queue len %d)", skb,
6363 			       skb_queue_len(&chan->srej_q));
6364 
6365 			chan->expected_tx_seq = __next_seq(chan, txseq);
6366 			break;
6367 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6368 			l2cap_seq_list_pop(&chan->srej_list);
6369 
6370 			l2cap_pass_to_tx(chan, control);
6371 			skb_queue_tail(&chan->srej_q, skb);
6372 			skb_in_use = true;
6373 			BT_DBG("Queued %p (queue len %d)", skb,
6374 			       skb_queue_len(&chan->srej_q));
6375 
6376 			err = l2cap_rx_queued_iframes(chan);
6377 			if (err)
6378 				break;
6379 
6380 			break;
6381 		case L2CAP_TXSEQ_UNEXPECTED:
6382 			/* Got a frame that can't be reassembled yet.
6383 			 * Save it for later, and send SREJs to cover
6384 			 * the missing frames.
6385 			 */
6386 			skb_queue_tail(&chan->srej_q, skb);
6387 			skb_in_use = true;
6388 			BT_DBG("Queued %p (queue len %d)", skb,
6389 			       skb_queue_len(&chan->srej_q));
6390 
6391 			l2cap_pass_to_tx(chan, control);
6392 			l2cap_send_srej(chan, control->txseq);
6393 			break;
6394 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6395 			/* This frame was requested with an SREJ, but
6396 			 * some expected retransmitted frames are
6397 			 * missing.  Request retransmission of missing
6398 			 * SREJ'd frames.
6399 			 */
6400 			skb_queue_tail(&chan->srej_q, skb);
6401 			skb_in_use = true;
6402 			BT_DBG("Queued %p (queue len %d)", skb,
6403 			       skb_queue_len(&chan->srej_q));
6404 
6405 			l2cap_pass_to_tx(chan, control);
6406 			l2cap_send_srej_list(chan, control->txseq);
6407 			break;
6408 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6409 			/* We've already queued this frame.  Drop this copy. */
6410 			l2cap_pass_to_tx(chan, control);
6411 			break;
6412 		case L2CAP_TXSEQ_DUPLICATE:
6413 			/* Expecting a later sequence number, so this frame
6414 			 * was already received.  Ignore it completely.
6415 			 */
6416 			break;
6417 		case L2CAP_TXSEQ_INVALID_IGNORE:
6418 			break;
6419 		case L2CAP_TXSEQ_INVALID:
6420 		default:
6421 			l2cap_send_disconn_req(chan, ECONNRESET);
6422 			break;
6423 		}
6424 		break;
6425 	case L2CAP_EV_RECV_RR:
6426 		l2cap_pass_to_tx(chan, control);
6427 		if (control->final) {
6428 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6429 
6430 			if (!test_and_clear_bit(CONN_REJ_ACT,
6431 						&chan->conn_state)) {
6432 				control->final = 0;
6433 				l2cap_retransmit_all(chan, control);
6434 			}
6435 
6436 			l2cap_ertm_send(chan);
6437 		} else if (control->poll) {
6438 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6439 					       &chan->conn_state) &&
6440 			    chan->unacked_frames) {
6441 				__set_retrans_timer(chan);
6442 			}
6443 
6444 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6445 			l2cap_send_srej_tail(chan);
6446 		} else {
6447 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6448 					       &chan->conn_state) &&
6449 			    chan->unacked_frames)
6450 				__set_retrans_timer(chan);
6451 
6452 			l2cap_send_ack(chan);
6453 		}
6454 		break;
6455 	case L2CAP_EV_RECV_RNR:
6456 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6457 		l2cap_pass_to_tx(chan, control);
6458 		if (control->poll) {
6459 			l2cap_send_srej_tail(chan);
6460 		} else {
6461 			struct l2cap_ctrl rr_control;
6462 			memset(&rr_control, 0, sizeof(rr_control));
6463 			rr_control.sframe = 1;
6464 			rr_control.super = L2CAP_SUPER_RR;
6465 			rr_control.reqseq = chan->buffer_seq;
6466 			l2cap_send_sframe(chan, &rr_control);
6467 		}
6468 
6469 		break;
6470 	case L2CAP_EV_RECV_REJ:
6471 		l2cap_handle_rej(chan, control);
6472 		break;
6473 	case L2CAP_EV_RECV_SREJ:
6474 		l2cap_handle_srej(chan, control);
6475 		break;
6476 	}
6477 
6478 	if (skb && !skb_in_use) {
6479 		BT_DBG("Freeing %p", skb);
6480 		kfree_skb(skb);
6481 	}
6482 
6483 	return err;
6484 }
6485 
6486 static int l2cap_finish_move(struct l2cap_chan *chan)
6487 {
6488 	BT_DBG("chan %p", chan);
6489 
6490 	chan->rx_state = L2CAP_RX_STATE_RECV;
6491 
6492 	if (chan->hs_hcon)
6493 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6494 	else
6495 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6496 
6497 	return l2cap_resegment(chan);
6498 }
6499 
6500 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6501 				 struct l2cap_ctrl *control,
6502 				 struct sk_buff *skb, u8 event)
6503 {
6504 	int err;
6505 
6506 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6507 	       event);
6508 
6509 	if (!control->poll)
6510 		return -EPROTO;
6511 
6512 	l2cap_process_reqseq(chan, control->reqseq);
6513 
6514 	if (!skb_queue_empty(&chan->tx_q))
6515 		chan->tx_send_head = skb_peek(&chan->tx_q);
6516 	else
6517 		chan->tx_send_head = NULL;
6518 
6519 	/* Rewind next_tx_seq to the point expected
6520 	 * by the receiver.
6521 	 */
6522 	chan->next_tx_seq = control->reqseq;
6523 	chan->unacked_frames = 0;
6524 
6525 	err = l2cap_finish_move(chan);
6526 	if (err)
6527 		return err;
6528 
6529 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6530 	l2cap_send_i_or_rr_or_rnr(chan);
6531 
6532 	if (event == L2CAP_EV_RECV_IFRAME)
6533 		return -EPROTO;
6534 
6535 	return l2cap_rx_state_recv(chan, control, NULL, event);
6536 }
6537 
6538 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6539 				 struct l2cap_ctrl *control,
6540 				 struct sk_buff *skb, u8 event)
6541 {
6542 	int err;
6543 
6544 	if (!control->final)
6545 		return -EPROTO;
6546 
6547 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6548 
6549 	chan->rx_state = L2CAP_RX_STATE_RECV;
6550 	l2cap_process_reqseq(chan, control->reqseq);
6551 
6552 	if (!skb_queue_empty(&chan->tx_q))
6553 		chan->tx_send_head = skb_peek(&chan->tx_q);
6554 	else
6555 		chan->tx_send_head = NULL;
6556 
6557 	/* Rewind next_tx_seq to the point expected
6558 	 * by the receiver.
6559 	 */
6560 	chan->next_tx_seq = control->reqseq;
6561 	chan->unacked_frames = 0;
6562 
6563 	if (chan->hs_hcon)
6564 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6565 	else
6566 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6567 
6568 	err = l2cap_resegment(chan);
6569 
6570 	if (!err)
6571 		err = l2cap_rx_state_recv(chan, control, skb, event);
6572 
6573 	return err;
6574 }
6575 
6576 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6577 {
6578 	/* Make sure reqseq is for a packet that has been sent but not acked */
6579 	u16 unacked;
6580 
6581 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6582 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6583 }
6584 
6585 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6586 		    struct sk_buff *skb, u8 event)
6587 {
6588 	int err = 0;
6589 
6590 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6591 	       control, skb, event, chan->rx_state);
6592 
6593 	if (__valid_reqseq(chan, control->reqseq)) {
6594 		switch (chan->rx_state) {
6595 		case L2CAP_RX_STATE_RECV:
6596 			err = l2cap_rx_state_recv(chan, control, skb, event);
6597 			break;
6598 		case L2CAP_RX_STATE_SREJ_SENT:
6599 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6600 						       event);
6601 			break;
6602 		case L2CAP_RX_STATE_WAIT_P:
6603 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6604 			break;
6605 		case L2CAP_RX_STATE_WAIT_F:
6606 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6607 			break;
6608 		default:
6609 			/* shut it down */
6610 			break;
6611 		}
6612 	} else {
6613 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6614 		       control->reqseq, chan->next_tx_seq,
6615 		       chan->expected_ack_seq);
6616 		l2cap_send_disconn_req(chan, ECONNRESET);
6617 	}
6618 
6619 	return err;
6620 }
6621 
6622 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6623 			   struct sk_buff *skb)
6624 {
6625 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6626 	       chan->rx_state);
6627 
6628 	if (l2cap_classify_txseq(chan, control->txseq) ==
6629 	    L2CAP_TXSEQ_EXPECTED) {
6630 		l2cap_pass_to_tx(chan, control);
6631 
6632 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6633 		       __next_seq(chan, chan->buffer_seq));
6634 
6635 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6636 
6637 		l2cap_reassemble_sdu(chan, skb, control);
6638 	} else {
6639 		if (chan->sdu) {
6640 			kfree_skb(chan->sdu);
6641 			chan->sdu = NULL;
6642 		}
6643 		chan->sdu_last_frag = NULL;
6644 		chan->sdu_len = 0;
6645 
6646 		if (skb) {
6647 			BT_DBG("Freeing %p", skb);
6648 			kfree_skb(skb);
6649 		}
6650 	}
6651 
6652 	chan->last_acked_seq = control->txseq;
6653 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6654 
6655 	return 0;
6656 }
6657 
6658 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6659 {
6660 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6661 	u16 len;
6662 	u8 event;
6663 
6664 	__unpack_control(chan, skb);
6665 
6666 	len = skb->len;
6667 
6668 	/*
6669 	 * We can just drop the corrupted I-frame here.
6670 	 * Receiver will miss it and start proper recovery
6671 	 * procedures and ask for retransmission.
6672 	 */
6673 	if (l2cap_check_fcs(chan, skb))
6674 		goto drop;
6675 
6676 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6677 		len -= L2CAP_SDULEN_SIZE;
6678 
6679 	if (chan->fcs == L2CAP_FCS_CRC16)
6680 		len -= L2CAP_FCS_SIZE;
6681 
6682 	if (len > chan->mps) {
6683 		l2cap_send_disconn_req(chan, ECONNRESET);
6684 		goto drop;
6685 	}
6686 
6687 	if ((chan->mode == L2CAP_MODE_ERTM ||
6688 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6689 		goto drop;
6690 
6691 	if (!control->sframe) {
6692 		int err;
6693 
6694 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6695 		       control->sar, control->reqseq, control->final,
6696 		       control->txseq);
6697 
6698 		/* Validate F-bit - F=0 always valid, F=1 only
6699 		 * valid in TX WAIT_F
6700 		 */
6701 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6702 			goto drop;
6703 
6704 		if (chan->mode != L2CAP_MODE_STREAMING) {
6705 			event = L2CAP_EV_RECV_IFRAME;
6706 			err = l2cap_rx(chan, control, skb, event);
6707 		} else {
6708 			err = l2cap_stream_rx(chan, control, skb);
6709 		}
6710 
6711 		if (err)
6712 			l2cap_send_disconn_req(chan, ECONNRESET);
6713 	} else {
6714 		const u8 rx_func_to_event[4] = {
6715 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6716 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6717 		};
6718 
6719 		/* Only I-frames are expected in streaming mode */
6720 		if (chan->mode == L2CAP_MODE_STREAMING)
6721 			goto drop;
6722 
6723 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6724 		       control->reqseq, control->final, control->poll,
6725 		       control->super);
6726 
6727 		if (len != 0) {
6728 			BT_ERR("Trailing bytes: %d in sframe", len);
6729 			l2cap_send_disconn_req(chan, ECONNRESET);
6730 			goto drop;
6731 		}
6732 
6733 		/* Validate F and P bits */
6734 		if (control->final && (control->poll ||
6735 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6736 			goto drop;
6737 
6738 		event = rx_func_to_event[control->super];
6739 		if (l2cap_rx(chan, control, skb, event))
6740 			l2cap_send_disconn_req(chan, ECONNRESET);
6741 	}
6742 
6743 	return 0;
6744 
6745 drop:
6746 	kfree_skb(skb);
6747 	return 0;
6748 }
6749 
6750 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6751 {
6752 	struct l2cap_conn *conn = chan->conn;
6753 	struct l2cap_le_credits pkt;
6754 	u16 return_credits;
6755 
6756 	return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6757 
6758 	if (!return_credits)
6759 		return;
6760 
6761 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6762 
6763 	chan->rx_credits += return_credits;
6764 
6765 	pkt.cid     = cpu_to_le16(chan->scid);
6766 	pkt.credits = cpu_to_le16(return_credits);
6767 
6768 	chan->ident = l2cap_get_ident(conn);
6769 
6770 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6771 }
6772 
6773 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6774 {
6775 	int err;
6776 
6777 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6778 
6779 	/* Wait recv to confirm reception before updating the credits */
6780 	err = chan->ops->recv(chan, skb);
6781 
6782 	/* Update credits whenever an SDU is received */
6783 	l2cap_chan_le_send_credits(chan);
6784 
6785 	return err;
6786 }
6787 
6788 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6789 {
6790 	int err;
6791 
6792 	if (!chan->rx_credits) {
6793 		BT_ERR("No credits to receive LE L2CAP data");
6794 		l2cap_send_disconn_req(chan, ECONNRESET);
6795 		return -ENOBUFS;
6796 	}
6797 
6798 	if (chan->imtu < skb->len) {
6799 		BT_ERR("Too big LE L2CAP PDU");
6800 		return -ENOBUFS;
6801 	}
6802 
6803 	chan->rx_credits--;
6804 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6805 
6806 	/* Update if remote had run out of credits, this should only happens
6807 	 * if the remote is not using the entire MPS.
6808 	 */
6809 	if (!chan->rx_credits)
6810 		l2cap_chan_le_send_credits(chan);
6811 
6812 	err = 0;
6813 
6814 	if (!chan->sdu) {
6815 		u16 sdu_len;
6816 
6817 		sdu_len = get_unaligned_le16(skb->data);
6818 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6819 
6820 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6821 		       sdu_len, skb->len, chan->imtu);
6822 
6823 		if (sdu_len > chan->imtu) {
6824 			BT_ERR("Too big LE L2CAP SDU length received");
6825 			err = -EMSGSIZE;
6826 			goto failed;
6827 		}
6828 
6829 		if (skb->len > sdu_len) {
6830 			BT_ERR("Too much LE L2CAP data received");
6831 			err = -EINVAL;
6832 			goto failed;
6833 		}
6834 
6835 		if (skb->len == sdu_len)
6836 			return l2cap_le_recv(chan, skb);
6837 
6838 		chan->sdu = skb;
6839 		chan->sdu_len = sdu_len;
6840 		chan->sdu_last_frag = skb;
6841 
6842 		/* Detect if remote is not able to use the selected MPS */
6843 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6844 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6845 
6846 			/* Adjust the number of credits */
6847 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6848 			chan->mps = mps_len;
6849 			l2cap_chan_le_send_credits(chan);
6850 		}
6851 
6852 		return 0;
6853 	}
6854 
6855 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6856 	       chan->sdu->len, skb->len, chan->sdu_len);
6857 
6858 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6859 		BT_ERR("Too much LE L2CAP data received");
6860 		err = -EINVAL;
6861 		goto failed;
6862 	}
6863 
6864 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6865 	skb = NULL;
6866 
6867 	if (chan->sdu->len == chan->sdu_len) {
6868 		err = l2cap_le_recv(chan, chan->sdu);
6869 		if (!err) {
6870 			chan->sdu = NULL;
6871 			chan->sdu_last_frag = NULL;
6872 			chan->sdu_len = 0;
6873 		}
6874 	}
6875 
6876 failed:
6877 	if (err) {
6878 		kfree_skb(skb);
6879 		kfree_skb(chan->sdu);
6880 		chan->sdu = NULL;
6881 		chan->sdu_last_frag = NULL;
6882 		chan->sdu_len = 0;
6883 	}
6884 
6885 	/* We can't return an error here since we took care of the skb
6886 	 * freeing internally. An error return would cause the caller to
6887 	 * do a double-free of the skb.
6888 	 */
6889 	return 0;
6890 }
6891 
6892 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6893 			       struct sk_buff *skb)
6894 {
6895 	struct l2cap_chan *chan;
6896 
6897 	chan = l2cap_get_chan_by_scid(conn, cid);
6898 	if (!chan) {
6899 		if (cid == L2CAP_CID_A2MP) {
6900 			chan = a2mp_channel_create(conn, skb);
6901 			if (!chan) {
6902 				kfree_skb(skb);
6903 				return;
6904 			}
6905 
6906 			l2cap_chan_lock(chan);
6907 		} else {
6908 			BT_DBG("unknown cid 0x%4.4x", cid);
6909 			/* Drop packet and return */
6910 			kfree_skb(skb);
6911 			return;
6912 		}
6913 	}
6914 
6915 	BT_DBG("chan %p, len %d", chan, skb->len);
6916 
6917 	/* If we receive data on a fixed channel before the info req/rsp
6918 	 * procdure is done simply assume that the channel is supported
6919 	 * and mark it as ready.
6920 	 */
6921 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6922 		l2cap_chan_ready(chan);
6923 
6924 	if (chan->state != BT_CONNECTED)
6925 		goto drop;
6926 
6927 	switch (chan->mode) {
6928 	case L2CAP_MODE_LE_FLOWCTL:
6929 		if (l2cap_le_data_rcv(chan, skb) < 0)
6930 			goto drop;
6931 
6932 		goto done;
6933 
6934 	case L2CAP_MODE_BASIC:
6935 		/* If socket recv buffers overflows we drop data here
6936 		 * which is *bad* because L2CAP has to be reliable.
6937 		 * But we don't have any other choice. L2CAP doesn't
6938 		 * provide flow control mechanism. */
6939 
6940 		if (chan->imtu < skb->len) {
6941 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6942 			goto drop;
6943 		}
6944 
6945 		if (!chan->ops->recv(chan, skb))
6946 			goto done;
6947 		break;
6948 
6949 	case L2CAP_MODE_ERTM:
6950 	case L2CAP_MODE_STREAMING:
6951 		l2cap_data_rcv(chan, skb);
6952 		goto done;
6953 
6954 	default:
6955 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6956 		break;
6957 	}
6958 
6959 drop:
6960 	kfree_skb(skb);
6961 
6962 done:
6963 	l2cap_chan_unlock(chan);
6964 }
6965 
6966 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6967 				  struct sk_buff *skb)
6968 {
6969 	struct hci_conn *hcon = conn->hcon;
6970 	struct l2cap_chan *chan;
6971 
6972 	if (hcon->type != ACL_LINK)
6973 		goto free_skb;
6974 
6975 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6976 					ACL_LINK);
6977 	if (!chan)
6978 		goto free_skb;
6979 
6980 	BT_DBG("chan %p, len %d", chan, skb->len);
6981 
6982 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6983 		goto drop;
6984 
6985 	if (chan->imtu < skb->len)
6986 		goto drop;
6987 
6988 	/* Store remote BD_ADDR and PSM for msg_name */
6989 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6990 	bt_cb(skb)->l2cap.psm = psm;
6991 
6992 	if (!chan->ops->recv(chan, skb)) {
6993 		l2cap_chan_put(chan);
6994 		return;
6995 	}
6996 
6997 drop:
6998 	l2cap_chan_put(chan);
6999 free_skb:
7000 	kfree_skb(skb);
7001 }
7002 
7003 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7004 {
7005 	struct l2cap_hdr *lh = (void *) skb->data;
7006 	struct hci_conn *hcon = conn->hcon;
7007 	u16 cid, len;
7008 	__le16 psm;
7009 
7010 	if (hcon->state != BT_CONNECTED) {
7011 		BT_DBG("queueing pending rx skb");
7012 		skb_queue_tail(&conn->pending_rx, skb);
7013 		return;
7014 	}
7015 
7016 	skb_pull(skb, L2CAP_HDR_SIZE);
7017 	cid = __le16_to_cpu(lh->cid);
7018 	len = __le16_to_cpu(lh->len);
7019 
7020 	if (len != skb->len) {
7021 		kfree_skb(skb);
7022 		return;
7023 	}
7024 
7025 	/* Since we can't actively block incoming LE connections we must
7026 	 * at least ensure that we ignore incoming data from them.
7027 	 */
7028 	if (hcon->type == LE_LINK &&
7029 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7030 				   bdaddr_dst_type(hcon))) {
7031 		kfree_skb(skb);
7032 		return;
7033 	}
7034 
7035 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7036 
7037 	switch (cid) {
7038 	case L2CAP_CID_SIGNALING:
7039 		l2cap_sig_channel(conn, skb);
7040 		break;
7041 
7042 	case L2CAP_CID_CONN_LESS:
7043 		psm = get_unaligned((__le16 *) skb->data);
7044 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7045 		l2cap_conless_channel(conn, psm, skb);
7046 		break;
7047 
7048 	case L2CAP_CID_LE_SIGNALING:
7049 		l2cap_le_sig_channel(conn, skb);
7050 		break;
7051 
7052 	default:
7053 		l2cap_data_channel(conn, cid, skb);
7054 		break;
7055 	}
7056 }
7057 
7058 static void process_pending_rx(struct work_struct *work)
7059 {
7060 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7061 					       pending_rx_work);
7062 	struct sk_buff *skb;
7063 
7064 	BT_DBG("");
7065 
7066 	while ((skb = skb_dequeue(&conn->pending_rx)))
7067 		l2cap_recv_frame(conn, skb);
7068 }
7069 
7070 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7071 {
7072 	struct l2cap_conn *conn = hcon->l2cap_data;
7073 	struct hci_chan *hchan;
7074 
7075 	if (conn)
7076 		return conn;
7077 
7078 	hchan = hci_chan_create(hcon);
7079 	if (!hchan)
7080 		return NULL;
7081 
7082 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7083 	if (!conn) {
7084 		hci_chan_del(hchan);
7085 		return NULL;
7086 	}
7087 
7088 	kref_init(&conn->ref);
7089 	hcon->l2cap_data = conn;
7090 	conn->hcon = hci_conn_get(hcon);
7091 	conn->hchan = hchan;
7092 
7093 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7094 
7095 	switch (hcon->type) {
7096 	case LE_LINK:
7097 		if (hcon->hdev->le_mtu) {
7098 			conn->mtu = hcon->hdev->le_mtu;
7099 			break;
7100 		}
7101 		/* fall through */
7102 	default:
7103 		conn->mtu = hcon->hdev->acl_mtu;
7104 		break;
7105 	}
7106 
7107 	conn->feat_mask = 0;
7108 
7109 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7110 
7111 	if (hcon->type == ACL_LINK &&
7112 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7113 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7114 
7115 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7116 	    (bredr_sc_enabled(hcon->hdev) ||
7117 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7118 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7119 
7120 	mutex_init(&conn->ident_lock);
7121 	mutex_init(&conn->chan_lock);
7122 
7123 	INIT_LIST_HEAD(&conn->chan_l);
7124 	INIT_LIST_HEAD(&conn->users);
7125 
7126 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7127 
7128 	skb_queue_head_init(&conn->pending_rx);
7129 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7130 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7131 
7132 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7133 
7134 	return conn;
7135 }
7136 
7137 static bool is_valid_psm(u16 psm, u8 dst_type) {
7138 	if (!psm)
7139 		return false;
7140 
7141 	if (bdaddr_type_is_le(dst_type))
7142 		return (psm <= 0x00ff);
7143 
7144 	/* PSM must be odd and lsb of upper byte must be 0 */
7145 	return ((psm & 0x0101) == 0x0001);
7146 }
7147 
7148 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7149 		       bdaddr_t *dst, u8 dst_type)
7150 {
7151 	struct l2cap_conn *conn;
7152 	struct hci_conn *hcon;
7153 	struct hci_dev *hdev;
7154 	int err;
7155 
7156 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7157 	       dst_type, __le16_to_cpu(psm));
7158 
7159 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7160 	if (!hdev)
7161 		return -EHOSTUNREACH;
7162 
7163 	hci_dev_lock(hdev);
7164 
7165 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7166 	    chan->chan_type != L2CAP_CHAN_RAW) {
7167 		err = -EINVAL;
7168 		goto done;
7169 	}
7170 
7171 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7172 		err = -EINVAL;
7173 		goto done;
7174 	}
7175 
7176 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7177 		err = -EINVAL;
7178 		goto done;
7179 	}
7180 
7181 	switch (chan->mode) {
7182 	case L2CAP_MODE_BASIC:
7183 		break;
7184 	case L2CAP_MODE_LE_FLOWCTL:
7185 		break;
7186 	case L2CAP_MODE_ERTM:
7187 	case L2CAP_MODE_STREAMING:
7188 		if (!disable_ertm)
7189 			break;
7190 		/* fall through */
7191 	default:
7192 		err = -EOPNOTSUPP;
7193 		goto done;
7194 	}
7195 
7196 	switch (chan->state) {
7197 	case BT_CONNECT:
7198 	case BT_CONNECT2:
7199 	case BT_CONFIG:
7200 		/* Already connecting */
7201 		err = 0;
7202 		goto done;
7203 
7204 	case BT_CONNECTED:
7205 		/* Already connected */
7206 		err = -EISCONN;
7207 		goto done;
7208 
7209 	case BT_OPEN:
7210 	case BT_BOUND:
7211 		/* Can connect */
7212 		break;
7213 
7214 	default:
7215 		err = -EBADFD;
7216 		goto done;
7217 	}
7218 
7219 	/* Set destination address and psm */
7220 	bacpy(&chan->dst, dst);
7221 	chan->dst_type = dst_type;
7222 
7223 	chan->psm = psm;
7224 	chan->dcid = cid;
7225 
7226 	if (bdaddr_type_is_le(dst_type)) {
7227 		/* Convert from L2CAP channel address type to HCI address type
7228 		 */
7229 		if (dst_type == BDADDR_LE_PUBLIC)
7230 			dst_type = ADDR_LE_DEV_PUBLIC;
7231 		else
7232 			dst_type = ADDR_LE_DEV_RANDOM;
7233 
7234 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7235 			hcon = hci_connect_le(hdev, dst, dst_type,
7236 					      chan->sec_level,
7237 					      HCI_LE_CONN_TIMEOUT,
7238 					      HCI_ROLE_SLAVE, NULL);
7239 		else
7240 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7241 						   chan->sec_level,
7242 						   HCI_LE_CONN_TIMEOUT);
7243 
7244 	} else {
7245 		u8 auth_type = l2cap_get_auth_type(chan);
7246 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7247 	}
7248 
7249 	if (IS_ERR(hcon)) {
7250 		err = PTR_ERR(hcon);
7251 		goto done;
7252 	}
7253 
7254 	conn = l2cap_conn_add(hcon);
7255 	if (!conn) {
7256 		hci_conn_drop(hcon);
7257 		err = -ENOMEM;
7258 		goto done;
7259 	}
7260 
7261 	mutex_lock(&conn->chan_lock);
7262 	l2cap_chan_lock(chan);
7263 
7264 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7265 		hci_conn_drop(hcon);
7266 		err = -EBUSY;
7267 		goto chan_unlock;
7268 	}
7269 
7270 	/* Update source addr of the socket */
7271 	bacpy(&chan->src, &hcon->src);
7272 	chan->src_type = bdaddr_src_type(hcon);
7273 
7274 	__l2cap_chan_add(conn, chan);
7275 
7276 	/* l2cap_chan_add takes its own ref so we can drop this one */
7277 	hci_conn_drop(hcon);
7278 
7279 	l2cap_state_change(chan, BT_CONNECT);
7280 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7281 
7282 	/* Release chan->sport so that it can be reused by other
7283 	 * sockets (as it's only used for listening sockets).
7284 	 */
7285 	write_lock(&chan_list_lock);
7286 	chan->sport = 0;
7287 	write_unlock(&chan_list_lock);
7288 
7289 	if (hcon->state == BT_CONNECTED) {
7290 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7291 			__clear_chan_timer(chan);
7292 			if (l2cap_chan_check_security(chan, true))
7293 				l2cap_state_change(chan, BT_CONNECTED);
7294 		} else
7295 			l2cap_do_start(chan);
7296 	}
7297 
7298 	err = 0;
7299 
7300 chan_unlock:
7301 	l2cap_chan_unlock(chan);
7302 	mutex_unlock(&conn->chan_lock);
7303 done:
7304 	hci_dev_unlock(hdev);
7305 	hci_dev_put(hdev);
7306 	return err;
7307 }
7308 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7309 
7310 /* ---- L2CAP interface with lower layer (HCI) ---- */
7311 
7312 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7313 {
7314 	int exact = 0, lm1 = 0, lm2 = 0;
7315 	struct l2cap_chan *c;
7316 
7317 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7318 
7319 	/* Find listening sockets and check their link_mode */
7320 	read_lock(&chan_list_lock);
7321 	list_for_each_entry(c, &chan_list, global_l) {
7322 		if (c->state != BT_LISTEN)
7323 			continue;
7324 
7325 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7326 			lm1 |= HCI_LM_ACCEPT;
7327 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7328 				lm1 |= HCI_LM_MASTER;
7329 			exact++;
7330 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7331 			lm2 |= HCI_LM_ACCEPT;
7332 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7333 				lm2 |= HCI_LM_MASTER;
7334 		}
7335 	}
7336 	read_unlock(&chan_list_lock);
7337 
7338 	return exact ? lm1 : lm2;
7339 }
7340 
7341 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7342  * from an existing channel in the list or from the beginning of the
7343  * global list (by passing NULL as first parameter).
7344  */
7345 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7346 						  struct hci_conn *hcon)
7347 {
7348 	u8 src_type = bdaddr_src_type(hcon);
7349 
7350 	read_lock(&chan_list_lock);
7351 
7352 	if (c)
7353 		c = list_next_entry(c, global_l);
7354 	else
7355 		c = list_entry(chan_list.next, typeof(*c), global_l);
7356 
7357 	list_for_each_entry_from(c, &chan_list, global_l) {
7358 		if (c->chan_type != L2CAP_CHAN_FIXED)
7359 			continue;
7360 		if (c->state != BT_LISTEN)
7361 			continue;
7362 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7363 			continue;
7364 		if (src_type != c->src_type)
7365 			continue;
7366 
7367 		l2cap_chan_hold(c);
7368 		read_unlock(&chan_list_lock);
7369 		return c;
7370 	}
7371 
7372 	read_unlock(&chan_list_lock);
7373 
7374 	return NULL;
7375 }
7376 
7377 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7378 {
7379 	struct hci_dev *hdev = hcon->hdev;
7380 	struct l2cap_conn *conn;
7381 	struct l2cap_chan *pchan;
7382 	u8 dst_type;
7383 
7384 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7385 		return;
7386 
7387 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7388 
7389 	if (status) {
7390 		l2cap_conn_del(hcon, bt_to_errno(status));
7391 		return;
7392 	}
7393 
7394 	conn = l2cap_conn_add(hcon);
7395 	if (!conn)
7396 		return;
7397 
7398 	dst_type = bdaddr_dst_type(hcon);
7399 
7400 	/* If device is blocked, do not create channels for it */
7401 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7402 		return;
7403 
7404 	/* Find fixed channels and notify them of the new connection. We
7405 	 * use multiple individual lookups, continuing each time where
7406 	 * we left off, because the list lock would prevent calling the
7407 	 * potentially sleeping l2cap_chan_lock() function.
7408 	 */
7409 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7410 	while (pchan) {
7411 		struct l2cap_chan *chan, *next;
7412 
7413 		/* Client fixed channels should override server ones */
7414 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7415 			goto next;
7416 
7417 		l2cap_chan_lock(pchan);
7418 		chan = pchan->ops->new_connection(pchan);
7419 		if (chan) {
7420 			bacpy(&chan->src, &hcon->src);
7421 			bacpy(&chan->dst, &hcon->dst);
7422 			chan->src_type = bdaddr_src_type(hcon);
7423 			chan->dst_type = dst_type;
7424 
7425 			__l2cap_chan_add(conn, chan);
7426 		}
7427 
7428 		l2cap_chan_unlock(pchan);
7429 next:
7430 		next = l2cap_global_fixed_chan(pchan, hcon);
7431 		l2cap_chan_put(pchan);
7432 		pchan = next;
7433 	}
7434 
7435 	l2cap_conn_ready(conn);
7436 }
7437 
7438 int l2cap_disconn_ind(struct hci_conn *hcon)
7439 {
7440 	struct l2cap_conn *conn = hcon->l2cap_data;
7441 
7442 	BT_DBG("hcon %p", hcon);
7443 
7444 	if (!conn)
7445 		return HCI_ERROR_REMOTE_USER_TERM;
7446 	return conn->disc_reason;
7447 }
7448 
7449 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7450 {
7451 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7452 		return;
7453 
7454 	BT_DBG("hcon %p reason %d", hcon, reason);
7455 
7456 	l2cap_conn_del(hcon, bt_to_errno(reason));
7457 }
7458 
7459 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7460 {
7461 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7462 		return;
7463 
7464 	if (encrypt == 0x00) {
7465 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7466 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7467 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7468 			   chan->sec_level == BT_SECURITY_FIPS)
7469 			l2cap_chan_close(chan, ECONNREFUSED);
7470 	} else {
7471 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7472 			__clear_chan_timer(chan);
7473 	}
7474 }
7475 
7476 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7477 {
7478 	struct l2cap_conn *conn = hcon->l2cap_data;
7479 	struct l2cap_chan *chan;
7480 
7481 	if (!conn)
7482 		return;
7483 
7484 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7485 
7486 	mutex_lock(&conn->chan_lock);
7487 
7488 	list_for_each_entry(chan, &conn->chan_l, list) {
7489 		l2cap_chan_lock(chan);
7490 
7491 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7492 		       state_to_string(chan->state));
7493 
7494 		if (chan->scid == L2CAP_CID_A2MP) {
7495 			l2cap_chan_unlock(chan);
7496 			continue;
7497 		}
7498 
7499 		if (!status && encrypt)
7500 			chan->sec_level = hcon->sec_level;
7501 
7502 		if (!__l2cap_no_conn_pending(chan)) {
7503 			l2cap_chan_unlock(chan);
7504 			continue;
7505 		}
7506 
7507 		if (!status && (chan->state == BT_CONNECTED ||
7508 				chan->state == BT_CONFIG)) {
7509 			chan->ops->resume(chan);
7510 			l2cap_check_encryption(chan, encrypt);
7511 			l2cap_chan_unlock(chan);
7512 			continue;
7513 		}
7514 
7515 		if (chan->state == BT_CONNECT) {
7516 			if (!status && l2cap_check_enc_key_size(hcon))
7517 				l2cap_start_connection(chan);
7518 			else
7519 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7520 		} else if (chan->state == BT_CONNECT2 &&
7521 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7522 			struct l2cap_conn_rsp rsp;
7523 			__u16 res, stat;
7524 
7525 			if (!status && l2cap_check_enc_key_size(hcon)) {
7526 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7527 					res = L2CAP_CR_PEND;
7528 					stat = L2CAP_CS_AUTHOR_PEND;
7529 					chan->ops->defer(chan);
7530 				} else {
7531 					l2cap_state_change(chan, BT_CONFIG);
7532 					res = L2CAP_CR_SUCCESS;
7533 					stat = L2CAP_CS_NO_INFO;
7534 				}
7535 			} else {
7536 				l2cap_state_change(chan, BT_DISCONN);
7537 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7538 				res = L2CAP_CR_SEC_BLOCK;
7539 				stat = L2CAP_CS_NO_INFO;
7540 			}
7541 
7542 			rsp.scid   = cpu_to_le16(chan->dcid);
7543 			rsp.dcid   = cpu_to_le16(chan->scid);
7544 			rsp.result = cpu_to_le16(res);
7545 			rsp.status = cpu_to_le16(stat);
7546 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7547 				       sizeof(rsp), &rsp);
7548 
7549 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7550 			    res == L2CAP_CR_SUCCESS) {
7551 				char buf[128];
7552 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7553 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7554 					       L2CAP_CONF_REQ,
7555 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7556 					       buf);
7557 				chan->num_conf_req++;
7558 			}
7559 		}
7560 
7561 		l2cap_chan_unlock(chan);
7562 	}
7563 
7564 	mutex_unlock(&conn->chan_lock);
7565 }
7566 
7567 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7568 {
7569 	struct l2cap_conn *conn = hcon->l2cap_data;
7570 	struct l2cap_hdr *hdr;
7571 	int len;
7572 
7573 	/* For AMP controller do not create l2cap conn */
7574 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7575 		goto drop;
7576 
7577 	if (!conn)
7578 		conn = l2cap_conn_add(hcon);
7579 
7580 	if (!conn)
7581 		goto drop;
7582 
7583 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7584 
7585 	switch (flags) {
7586 	case ACL_START:
7587 	case ACL_START_NO_FLUSH:
7588 	case ACL_COMPLETE:
7589 		if (conn->rx_len) {
7590 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7591 			kfree_skb(conn->rx_skb);
7592 			conn->rx_skb = NULL;
7593 			conn->rx_len = 0;
7594 			l2cap_conn_unreliable(conn, ECOMM);
7595 		}
7596 
7597 		/* Start fragment always begin with Basic L2CAP header */
7598 		if (skb->len < L2CAP_HDR_SIZE) {
7599 			BT_ERR("Frame is too short (len %d)", skb->len);
7600 			l2cap_conn_unreliable(conn, ECOMM);
7601 			goto drop;
7602 		}
7603 
7604 		hdr = (struct l2cap_hdr *) skb->data;
7605 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7606 
7607 		if (len == skb->len) {
7608 			/* Complete frame received */
7609 			l2cap_recv_frame(conn, skb);
7610 			return;
7611 		}
7612 
7613 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7614 
7615 		if (skb->len > len) {
7616 			BT_ERR("Frame is too long (len %d, expected len %d)",
7617 			       skb->len, len);
7618 			l2cap_conn_unreliable(conn, ECOMM);
7619 			goto drop;
7620 		}
7621 
7622 		/* Allocate skb for the complete frame (with header) */
7623 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7624 		if (!conn->rx_skb)
7625 			goto drop;
7626 
7627 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7628 					  skb->len);
7629 		conn->rx_len = len - skb->len;
7630 		break;
7631 
7632 	case ACL_CONT:
7633 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7634 
7635 		if (!conn->rx_len) {
7636 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7637 			l2cap_conn_unreliable(conn, ECOMM);
7638 			goto drop;
7639 		}
7640 
7641 		if (skb->len > conn->rx_len) {
7642 			BT_ERR("Fragment is too long (len %d, expected %d)",
7643 			       skb->len, conn->rx_len);
7644 			kfree_skb(conn->rx_skb);
7645 			conn->rx_skb = NULL;
7646 			conn->rx_len = 0;
7647 			l2cap_conn_unreliable(conn, ECOMM);
7648 			goto drop;
7649 		}
7650 
7651 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7652 					  skb->len);
7653 		conn->rx_len -= skb->len;
7654 
7655 		if (!conn->rx_len) {
7656 			/* Complete frame received. l2cap_recv_frame
7657 			 * takes ownership of the skb so set the global
7658 			 * rx_skb pointer to NULL first.
7659 			 */
7660 			struct sk_buff *rx_skb = conn->rx_skb;
7661 			conn->rx_skb = NULL;
7662 			l2cap_recv_frame(conn, rx_skb);
7663 		}
7664 		break;
7665 	}
7666 
7667 drop:
7668 	kfree_skb(skb);
7669 }
7670 
7671 static struct hci_cb l2cap_cb = {
7672 	.name		= "L2CAP",
7673 	.connect_cfm	= l2cap_connect_cfm,
7674 	.disconn_cfm	= l2cap_disconn_cfm,
7675 	.security_cfm	= l2cap_security_cfm,
7676 };
7677 
7678 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7679 {
7680 	struct l2cap_chan *c;
7681 
7682 	read_lock(&chan_list_lock);
7683 
7684 	list_for_each_entry(c, &chan_list, global_l) {
7685 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7686 			   &c->src, c->src_type, &c->dst, c->dst_type,
7687 			   c->state, __le16_to_cpu(c->psm),
7688 			   c->scid, c->dcid, c->imtu, c->omtu,
7689 			   c->sec_level, c->mode);
7690 	}
7691 
7692 	read_unlock(&chan_list_lock);
7693 
7694 	return 0;
7695 }
7696 
7697 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7698 
7699 static struct dentry *l2cap_debugfs;
7700 
7701 int __init l2cap_init(void)
7702 {
7703 	int err;
7704 
7705 	err = l2cap_init_sockets();
7706 	if (err < 0)
7707 		return err;
7708 
7709 	hci_register_cb(&l2cap_cb);
7710 
7711 	if (IS_ERR_OR_NULL(bt_debugfs))
7712 		return 0;
7713 
7714 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7715 					    NULL, &l2cap_debugfs_fops);
7716 
7717 	return 0;
7718 }
7719 
7720 void l2cap_exit(void)
7721 {
7722 	debugfs_remove(l2cap_debugfs);
7723 	hci_unregister_cb(&l2cap_cb);
7724 	l2cap_cleanup_sockets();
7725 }
7726 
7727 module_param(disable_ertm, bool, 0644);
7728 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7729