xref: /linux/net/bluetooth/l2cap_core.c (revision cd1199edc719f4a918a19bd2c6b8f79329837561)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			chan->scid = L2CAP_CID_LE_DATA;
508 			chan->dcid = L2CAP_CID_LE_DATA;
509 		} else {
510 			/* Alloc CID for connection-oriented socket */
511 			chan->scid = l2cap_alloc_cid(conn);
512 			chan->omtu = L2CAP_DEFAULT_MTU;
513 		}
514 		break;
515 
516 	case L2CAP_CHAN_CONN_LESS:
517 		/* Connectionless socket */
518 		chan->scid = L2CAP_CID_CONN_LESS;
519 		chan->dcid = L2CAP_CID_CONN_LESS;
520 		chan->omtu = L2CAP_DEFAULT_MTU;
521 		break;
522 
523 	case L2CAP_CHAN_CONN_FIX_A2MP:
524 		chan->scid = L2CAP_CID_A2MP;
525 		chan->dcid = L2CAP_CID_A2MP;
526 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 		break;
529 
530 	default:
531 		/* Raw socket can send/recv signalling messages only */
532 		chan->scid = L2CAP_CID_SIGNALING;
533 		chan->dcid = L2CAP_CID_SIGNALING;
534 		chan->omtu = L2CAP_DEFAULT_MTU;
535 	}
536 
537 	chan->local_id		= L2CAP_BESTEFFORT_ID;
538 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
539 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
540 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
541 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
542 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
543 
544 	l2cap_chan_hold(chan);
545 
546 	list_add(&chan->list, &conn->chan_l);
547 }
548 
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 	mutex_lock(&conn->chan_lock);
552 	__l2cap_chan_add(conn, chan);
553 	mutex_unlock(&conn->chan_lock);
554 }
555 
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 	struct l2cap_conn *conn = chan->conn;
559 
560 	__clear_chan_timer(chan);
561 
562 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563 
564 	if (conn) {
565 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 		/* Delete from channel list */
567 		list_del(&chan->list);
568 
569 		l2cap_chan_put(chan);
570 
571 		chan->conn = NULL;
572 
573 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 			hci_conn_drop(conn->hcon);
575 
576 		if (mgr && mgr->bredr_chan == chan)
577 			mgr->bredr_chan = NULL;
578 	}
579 
580 	if (chan->hs_hchan) {
581 		struct hci_chan *hs_hchan = chan->hs_hchan;
582 
583 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 		amp_disconnect_logical_link(hs_hchan);
585 	}
586 
587 	chan->ops->teardown(chan, err);
588 
589 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 		return;
591 
592 	switch(chan->mode) {
593 	case L2CAP_MODE_BASIC:
594 		break;
595 
596 	case L2CAP_MODE_ERTM:
597 		__clear_retrans_timer(chan);
598 		__clear_monitor_timer(chan);
599 		__clear_ack_timer(chan);
600 
601 		skb_queue_purge(&chan->srej_q);
602 
603 		l2cap_seq_list_free(&chan->srej_list);
604 		l2cap_seq_list_free(&chan->retrans_list);
605 
606 		/* fall through */
607 
608 	case L2CAP_MODE_STREAMING:
609 		skb_queue_purge(&chan->tx_q);
610 		break;
611 	}
612 
613 	return;
614 }
615 
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 	struct l2cap_conn *conn = chan->conn;
619 	struct sock *sk = chan->sk;
620 
621 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 	       sk);
623 
624 	switch (chan->state) {
625 	case BT_LISTEN:
626 		chan->ops->teardown(chan, 0);
627 		break;
628 
629 	case BT_CONNECTED:
630 	case BT_CONFIG:
631 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 		    conn->hcon->type == ACL_LINK) {
633 			__set_chan_timer(chan, sk->sk_sndtimeo);
634 			l2cap_send_disconn_req(chan, reason);
635 		} else
636 			l2cap_chan_del(chan, reason);
637 		break;
638 
639 	case BT_CONNECT2:
640 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 		    conn->hcon->type == ACL_LINK) {
642 			struct l2cap_conn_rsp rsp;
643 			__u16 result;
644 
645 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 				result = L2CAP_CR_SEC_BLOCK;
647 			else
648 				result = L2CAP_CR_BAD_PSM;
649 			l2cap_state_change(chan, BT_DISCONN);
650 
651 			rsp.scid   = cpu_to_le16(chan->dcid);
652 			rsp.dcid   = cpu_to_le16(chan->scid);
653 			rsp.result = cpu_to_le16(result);
654 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 				       sizeof(rsp), &rsp);
657 		}
658 
659 		l2cap_chan_del(chan, reason);
660 		break;
661 
662 	case BT_CONNECT:
663 	case BT_DISCONN:
664 		l2cap_chan_del(chan, reason);
665 		break;
666 
667 	default:
668 		chan->ops->teardown(chan, 0);
669 		break;
670 	}
671 }
672 
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 	if (chan->chan_type == L2CAP_CHAN_RAW) {
676 		switch (chan->sec_level) {
677 		case BT_SECURITY_HIGH:
678 			return HCI_AT_DEDICATED_BONDING_MITM;
679 		case BT_SECURITY_MEDIUM:
680 			return HCI_AT_DEDICATED_BONDING;
681 		default:
682 			return HCI_AT_NO_BONDING;
683 		}
684 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 		if (chan->sec_level == BT_SECURITY_LOW)
686 			chan->sec_level = BT_SECURITY_SDP;
687 
688 		if (chan->sec_level == BT_SECURITY_HIGH)
689 			return HCI_AT_NO_BONDING_MITM;
690 		else
691 			return HCI_AT_NO_BONDING;
692 	} else {
693 		switch (chan->sec_level) {
694 		case BT_SECURITY_HIGH:
695 			return HCI_AT_GENERAL_BONDING_MITM;
696 		case BT_SECURITY_MEDIUM:
697 			return HCI_AT_GENERAL_BONDING;
698 		default:
699 			return HCI_AT_NO_BONDING;
700 		}
701 	}
702 }
703 
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 	struct l2cap_conn *conn = chan->conn;
708 	__u8 auth_type;
709 
710 	auth_type = l2cap_get_auth_type(chan);
711 
712 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714 
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 	u8 id;
718 
719 	/* Get next available identificator.
720 	 *    1 - 128 are used by kernel.
721 	 *  129 - 199 are reserved.
722 	 *  200 - 254 are used by utilities like l2ping, etc.
723 	 */
724 
725 	spin_lock(&conn->lock);
726 
727 	if (++conn->tx_ident > 128)
728 		conn->tx_ident = 1;
729 
730 	id = conn->tx_ident;
731 
732 	spin_unlock(&conn->lock);
733 
734 	return id;
735 }
736 
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 			   void *data)
739 {
740 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 	u8 flags;
742 
743 	BT_DBG("code 0x%2.2x", code);
744 
745 	if (!skb)
746 		return;
747 
748 	if (lmp_no_flush_capable(conn->hcon->hdev))
749 		flags = ACL_START_NO_FLUSH;
750 	else
751 		flags = ACL_START;
752 
753 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 	skb->priority = HCI_PRIO_MAX;
755 
756 	hci_send_acl(conn->hchan, skb, flags);
757 }
758 
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 	return chan->move_state != L2CAP_MOVE_STABLE &&
762 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764 
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 	struct hci_conn *hcon = chan->conn->hcon;
768 	u16 flags;
769 
770 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 	       skb->priority);
772 
773 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 		if (chan->hs_hchan)
775 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 		else
777 			kfree_skb(skb);
778 
779 		return;
780 	}
781 
782 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 	    lmp_no_flush_capable(hcon->hdev))
784 		flags = ACL_START_NO_FLUSH;
785 	else
786 		flags = ACL_START;
787 
788 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 	hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791 
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796 
797 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 		/* S-Frame */
799 		control->sframe = 1;
800 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802 
803 		control->sar = 0;
804 		control->txseq = 0;
805 	} else {
806 		/* I-Frame */
807 		control->sframe = 0;
808 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810 
811 		control->poll = 0;
812 		control->super = 0;
813 	}
814 }
815 
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820 
821 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 		/* S-Frame */
823 		control->sframe = 1;
824 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826 
827 		control->sar = 0;
828 		control->txseq = 0;
829 	} else {
830 		/* I-Frame */
831 		control->sframe = 0;
832 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834 
835 		control->poll = 0;
836 		control->super = 0;
837 	}
838 }
839 
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 				    struct sk_buff *skb)
842 {
843 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 		__unpack_extended_control(get_unaligned_le32(skb->data),
845 					  &bt_cb(skb)->control);
846 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 	} else {
848 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 	}
852 }
853 
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 	u32 packed;
857 
858 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860 
861 	if (control->sframe) {
862 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 	} else {
866 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 	}
869 
870 	return packed;
871 }
872 
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 	u16 packed;
876 
877 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879 
880 	if (control->sframe) {
881 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 		packed |= L2CAP_CTRL_FRAME_TYPE;
884 	} else {
885 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 	}
888 
889 	return packed;
890 }
891 
892 static inline void __pack_control(struct l2cap_chan *chan,
893 				  struct l2cap_ctrl *control,
894 				  struct sk_buff *skb)
895 {
896 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 		put_unaligned_le32(__pack_extended_control(control),
898 				   skb->data + L2CAP_HDR_SIZE);
899 	} else {
900 		put_unaligned_le16(__pack_enhanced_control(control),
901 				   skb->data + L2CAP_HDR_SIZE);
902 	}
903 }
904 
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 		return L2CAP_EXT_HDR_SIZE;
909 	else
910 		return L2CAP_ENH_HDR_SIZE;
911 }
912 
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 					       u32 control)
915 {
916 	struct sk_buff *skb;
917 	struct l2cap_hdr *lh;
918 	int hlen = __ertm_hdr_size(chan);
919 
920 	if (chan->fcs == L2CAP_FCS_CRC16)
921 		hlen += L2CAP_FCS_SIZE;
922 
923 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
924 
925 	if (!skb)
926 		return ERR_PTR(-ENOMEM);
927 
928 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 	lh->cid = cpu_to_le16(chan->dcid);
931 
932 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 	else
935 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936 
937 	if (chan->fcs == L2CAP_FCS_CRC16) {
938 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 	}
941 
942 	skb->priority = HCI_PRIO_MAX;
943 	return skb;
944 }
945 
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 			      struct l2cap_ctrl *control)
948 {
949 	struct sk_buff *skb;
950 	u32 control_field;
951 
952 	BT_DBG("chan %p, control %p", chan, control);
953 
954 	if (!control->sframe)
955 		return;
956 
957 	if (__chan_is_moving(chan))
958 		return;
959 
960 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 	    !control->poll)
962 		control->final = 1;
963 
964 	if (control->super == L2CAP_SUPER_RR)
965 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 	else if (control->super == L2CAP_SUPER_RNR)
967 		set_bit(CONN_RNR_SENT, &chan->conn_state);
968 
969 	if (control->super != L2CAP_SUPER_SREJ) {
970 		chan->last_acked_seq = control->reqseq;
971 		__clear_ack_timer(chan);
972 	}
973 
974 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 	       control->final, control->poll, control->super);
976 
977 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 		control_field = __pack_extended_control(control);
979 	else
980 		control_field = __pack_enhanced_control(control);
981 
982 	skb = l2cap_create_sframe_pdu(chan, control_field);
983 	if (!IS_ERR(skb))
984 		l2cap_do_send(chan, skb);
985 }
986 
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 	struct l2cap_ctrl control;
990 
991 	BT_DBG("chan %p, poll %d", chan, poll);
992 
993 	memset(&control, 0, sizeof(control));
994 	control.sframe = 1;
995 	control.poll = poll;
996 
997 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 		control.super = L2CAP_SUPER_RNR;
999 	else
1000 		control.super = L2CAP_SUPER_RR;
1001 
1002 	control.reqseq = chan->buffer_seq;
1003 	l2cap_send_sframe(chan, &control);
1004 }
1005 
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010 
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 	struct l2cap_conn *conn = chan->conn;
1014 
1015 	if (enable_hs &&
1016 	    hci_amp_capable() &&
1017 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 		return true;
1020 	else
1021 		return false;
1022 }
1023 
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 	/* Check EFS parameters */
1027 	return true;
1028 }
1029 
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 	struct l2cap_conn *conn = chan->conn;
1033 	struct l2cap_conn_req req;
1034 
1035 	req.scid = cpu_to_le16(chan->scid);
1036 	req.psm  = chan->psm;
1037 
1038 	chan->ident = l2cap_get_ident(conn);
1039 
1040 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041 
1042 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044 
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 	struct l2cap_create_chan_req req;
1048 	req.scid = cpu_to_le16(chan->scid);
1049 	req.psm  = chan->psm;
1050 	req.amp_id = amp_id;
1051 
1052 	chan->ident = l2cap_get_ident(chan->conn);
1053 
1054 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 		       sizeof(req), &req);
1056 }
1057 
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 	struct sk_buff *skb;
1061 
1062 	BT_DBG("chan %p", chan);
1063 
1064 	if (chan->mode != L2CAP_MODE_ERTM)
1065 		return;
1066 
1067 	__clear_retrans_timer(chan);
1068 	__clear_monitor_timer(chan);
1069 	__clear_ack_timer(chan);
1070 
1071 	chan->retry_count = 0;
1072 	skb_queue_walk(&chan->tx_q, skb) {
1073 		if (bt_cb(skb)->control.retries)
1074 			bt_cb(skb)->control.retries = 1;
1075 		else
1076 			break;
1077 	}
1078 
1079 	chan->expected_tx_seq = chan->buffer_seq;
1080 
1081 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 	l2cap_seq_list_clear(&chan->retrans_list);
1084 	l2cap_seq_list_clear(&chan->srej_list);
1085 	skb_queue_purge(&chan->srej_q);
1086 
1087 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1089 
1090 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092 
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 	u8 move_role = chan->move_role;
1096 	BT_DBG("chan %p", chan);
1097 
1098 	chan->move_state = L2CAP_MOVE_STABLE;
1099 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100 
1101 	if (chan->mode != L2CAP_MODE_ERTM)
1102 		return;
1103 
1104 	switch (move_role) {
1105 	case L2CAP_MOVE_ROLE_INITIATOR:
1106 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 		break;
1109 	case L2CAP_MOVE_ROLE_RESPONDER:
1110 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 		break;
1112 	}
1113 }
1114 
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 	chan->conf_state = 0;
1119 	__clear_chan_timer(chan);
1120 
1121 	chan->state = BT_CONNECTED;
1122 
1123 	chan->ops->ready(chan);
1124 }
1125 
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 	if (__amp_capable(chan)) {
1129 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 		a2mp_discover_amp(chan);
1131 	} else {
1132 		l2cap_send_conn_req(chan);
1133 	}
1134 }
1135 
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 
1140 	if (conn->hcon->type == LE_LINK) {
1141 		l2cap_chan_ready(chan);
1142 		return;
1143 	}
1144 
1145 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 			return;
1148 
1149 		if (l2cap_chan_check_security(chan) &&
1150 		    __l2cap_no_conn_pending(chan)) {
1151 			l2cap_start_connection(chan);
1152 		}
1153 	} else {
1154 		struct l2cap_info_req req;
1155 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156 
1157 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 		conn->info_ident = l2cap_get_ident(conn);
1159 
1160 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161 
1162 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 			       sizeof(req), &req);
1164 	}
1165 }
1166 
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 	u32 local_feat_mask = l2cap_feat_mask;
1170 	if (!disable_ertm)
1171 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172 
1173 	switch (mode) {
1174 	case L2CAP_MODE_ERTM:
1175 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 	case L2CAP_MODE_STREAMING:
1177 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 	default:
1179 		return 0x00;
1180 	}
1181 }
1182 
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 	struct sock *sk = chan->sk;
1186 	struct l2cap_conn *conn = chan->conn;
1187 	struct l2cap_disconn_req req;
1188 
1189 	if (!conn)
1190 		return;
1191 
1192 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 		__clear_retrans_timer(chan);
1194 		__clear_monitor_timer(chan);
1195 		__clear_ack_timer(chan);
1196 	}
1197 
1198 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 		l2cap_state_change(chan, BT_DISCONN);
1200 		return;
1201 	}
1202 
1203 	req.dcid = cpu_to_le16(chan->dcid);
1204 	req.scid = cpu_to_le16(chan->scid);
1205 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 		       sizeof(req), &req);
1207 
1208 	lock_sock(sk);
1209 	__l2cap_state_change(chan, BT_DISCONN);
1210 	__l2cap_chan_set_err(chan, err);
1211 	release_sock(sk);
1212 }
1213 
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 	struct l2cap_chan *chan, *tmp;
1218 
1219 	BT_DBG("conn %p", conn);
1220 
1221 	mutex_lock(&conn->chan_lock);
1222 
1223 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 		struct sock *sk = chan->sk;
1225 
1226 		l2cap_chan_lock(chan);
1227 
1228 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 			l2cap_chan_unlock(chan);
1230 			continue;
1231 		}
1232 
1233 		if (chan->state == BT_CONNECT) {
1234 			if (!l2cap_chan_check_security(chan) ||
1235 			    !__l2cap_no_conn_pending(chan)) {
1236 				l2cap_chan_unlock(chan);
1237 				continue;
1238 			}
1239 
1240 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 			    && test_bit(CONF_STATE2_DEVICE,
1242 					&chan->conf_state)) {
1243 				l2cap_chan_close(chan, ECONNRESET);
1244 				l2cap_chan_unlock(chan);
1245 				continue;
1246 			}
1247 
1248 			l2cap_start_connection(chan);
1249 
1250 		} else if (chan->state == BT_CONNECT2) {
1251 			struct l2cap_conn_rsp rsp;
1252 			char buf[128];
1253 			rsp.scid = cpu_to_le16(chan->dcid);
1254 			rsp.dcid = cpu_to_le16(chan->scid);
1255 
1256 			if (l2cap_chan_check_security(chan)) {
1257 				lock_sock(sk);
1258 				if (test_bit(BT_SK_DEFER_SETUP,
1259 					     &bt_sk(sk)->flags)) {
1260 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 					chan->ops->defer(chan);
1263 
1264 				} else {
1265 					__l2cap_state_change(chan, BT_CONFIG);
1266 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 				}
1269 				release_sock(sk);
1270 			} else {
1271 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 			}
1274 
1275 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 				       sizeof(rsp), &rsp);
1277 
1278 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 			    rsp.result != L2CAP_CR_SUCCESS) {
1280 				l2cap_chan_unlock(chan);
1281 				continue;
1282 			}
1283 
1284 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 				       l2cap_build_conf_req(chan, buf), buf);
1287 			chan->num_conf_req++;
1288 		}
1289 
1290 		l2cap_chan_unlock(chan);
1291 	}
1292 
1293 	mutex_unlock(&conn->chan_lock);
1294 }
1295 
1296 /* Find socket with cid and source/destination bdaddr.
1297  * Returns closest match, locked.
1298  */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 						    bdaddr_t *src,
1301 						    bdaddr_t *dst)
1302 {
1303 	struct l2cap_chan *c, *c1 = NULL;
1304 
1305 	read_lock(&chan_list_lock);
1306 
1307 	list_for_each_entry(c, &chan_list, global_l) {
1308 		struct sock *sk = c->sk;
1309 
1310 		if (state && c->state != state)
1311 			continue;
1312 
1313 		if (c->scid == cid) {
1314 			int src_match, dst_match;
1315 			int src_any, dst_any;
1316 
1317 			/* Exact match. */
1318 			src_match = !bacmp(&bt_sk(sk)->src, src);
1319 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 			if (src_match && dst_match) {
1321 				read_unlock(&chan_list_lock);
1322 				return c;
1323 			}
1324 
1325 			/* Closest match */
1326 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 			if ((src_match && dst_any) || (src_any && dst_match) ||
1329 			    (src_any && dst_any))
1330 				c1 = c;
1331 		}
1332 	}
1333 
1334 	read_unlock(&chan_list_lock);
1335 
1336 	return c1;
1337 }
1338 
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 	struct sock *parent, *sk;
1342 	struct l2cap_chan *chan, *pchan;
1343 
1344 	BT_DBG("");
1345 
1346 	/* Check if we have socket listening on cid */
1347 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 					  conn->src, conn->dst);
1349 	if (!pchan)
1350 		return;
1351 
1352 	parent = pchan->sk;
1353 
1354 	lock_sock(parent);
1355 
1356 	chan = pchan->ops->new_connection(pchan);
1357 	if (!chan)
1358 		goto clean;
1359 
1360 	sk = chan->sk;
1361 
1362 	hci_conn_hold(conn->hcon);
1363 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364 
1365 	bacpy(&bt_sk(sk)->src, conn->src);
1366 	bacpy(&bt_sk(sk)->dst, conn->dst);
1367 
1368 	l2cap_chan_add(conn, chan);
1369 
1370 	l2cap_chan_ready(chan);
1371 
1372 clean:
1373 	release_sock(parent);
1374 }
1375 
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 	struct l2cap_chan *chan;
1379 	struct hci_conn *hcon = conn->hcon;
1380 
1381 	BT_DBG("conn %p", conn);
1382 
1383 	if (!hcon->out && hcon->type == LE_LINK)
1384 		l2cap_le_conn_ready(conn);
1385 
1386 	if (hcon->out && hcon->type == LE_LINK)
1387 		smp_conn_security(hcon, hcon->pending_sec_level);
1388 
1389 	mutex_lock(&conn->chan_lock);
1390 
1391 	list_for_each_entry(chan, &conn->chan_l, list) {
1392 
1393 		l2cap_chan_lock(chan);
1394 
1395 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 			l2cap_chan_unlock(chan);
1397 			continue;
1398 		}
1399 
1400 		if (hcon->type == LE_LINK) {
1401 			if (smp_conn_security(hcon, chan->sec_level))
1402 				l2cap_chan_ready(chan);
1403 
1404 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 			struct sock *sk = chan->sk;
1406 			__clear_chan_timer(chan);
1407 			lock_sock(sk);
1408 			__l2cap_state_change(chan, BT_CONNECTED);
1409 			sk->sk_state_change(sk);
1410 			release_sock(sk);
1411 
1412 		} else if (chan->state == BT_CONNECT)
1413 			l2cap_do_start(chan);
1414 
1415 		l2cap_chan_unlock(chan);
1416 	}
1417 
1418 	mutex_unlock(&conn->chan_lock);
1419 }
1420 
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 	struct l2cap_chan *chan;
1425 
1426 	BT_DBG("conn %p", conn);
1427 
1428 	mutex_lock(&conn->chan_lock);
1429 
1430 	list_for_each_entry(chan, &conn->chan_l, list) {
1431 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 			l2cap_chan_set_err(chan, err);
1433 	}
1434 
1435 	mutex_unlock(&conn->chan_lock);
1436 }
1437 
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 					       info_timer.work);
1442 
1443 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 	conn->info_ident = 0;
1445 
1446 	l2cap_conn_start(conn);
1447 }
1448 
1449 /*
1450  * l2cap_user
1451  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1452  * callback is called during registration. The ->remove callback is called
1453  * during unregistration.
1454  * An l2cap_user object can either be explicitly unregistered or when the
1455  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1456  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1457  * External modules must own a reference to the l2cap_conn object if they intend
1458  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1459  * any time if they don't.
1460  */
1461 
1462 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1463 {
1464 	struct hci_dev *hdev = conn->hcon->hdev;
1465 	int ret;
1466 
1467 	/* We need to check whether l2cap_conn is registered. If it is not, we
1468 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1469 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1470 	 * relies on the parent hci_conn object to be locked. This itself relies
1471 	 * on the hci_dev object to be locked. So we must lock the hci device
1472 	 * here, too. */
1473 
1474 	hci_dev_lock(hdev);
1475 
1476 	if (user->list.next || user->list.prev) {
1477 		ret = -EINVAL;
1478 		goto out_unlock;
1479 	}
1480 
1481 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1482 	if (!conn->hchan) {
1483 		ret = -ENODEV;
1484 		goto out_unlock;
1485 	}
1486 
1487 	ret = user->probe(conn, user);
1488 	if (ret)
1489 		goto out_unlock;
1490 
1491 	list_add(&user->list, &conn->users);
1492 	ret = 0;
1493 
1494 out_unlock:
1495 	hci_dev_unlock(hdev);
1496 	return ret;
1497 }
1498 EXPORT_SYMBOL(l2cap_register_user);
1499 
1500 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1501 {
1502 	struct hci_dev *hdev = conn->hcon->hdev;
1503 
1504 	hci_dev_lock(hdev);
1505 
1506 	if (!user->list.next || !user->list.prev)
1507 		goto out_unlock;
1508 
1509 	list_del(&user->list);
1510 	user->list.next = NULL;
1511 	user->list.prev = NULL;
1512 	user->remove(conn, user);
1513 
1514 out_unlock:
1515 	hci_dev_unlock(hdev);
1516 }
1517 EXPORT_SYMBOL(l2cap_unregister_user);
1518 
1519 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1520 {
1521 	struct l2cap_user *user;
1522 
1523 	while (!list_empty(&conn->users)) {
1524 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1525 		list_del(&user->list);
1526 		user->list.next = NULL;
1527 		user->list.prev = NULL;
1528 		user->remove(conn, user);
1529 	}
1530 }
1531 
1532 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1533 {
1534 	struct l2cap_conn *conn = hcon->l2cap_data;
1535 	struct l2cap_chan *chan, *l;
1536 
1537 	if (!conn)
1538 		return;
1539 
1540 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1541 
1542 	kfree_skb(conn->rx_skb);
1543 
1544 	l2cap_unregister_all_users(conn);
1545 
1546 	mutex_lock(&conn->chan_lock);
1547 
1548 	/* Kill channels */
1549 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1550 		l2cap_chan_hold(chan);
1551 		l2cap_chan_lock(chan);
1552 
1553 		l2cap_chan_del(chan, err);
1554 
1555 		l2cap_chan_unlock(chan);
1556 
1557 		chan->ops->close(chan);
1558 		l2cap_chan_put(chan);
1559 	}
1560 
1561 	mutex_unlock(&conn->chan_lock);
1562 
1563 	hci_chan_del(conn->hchan);
1564 
1565 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1566 		cancel_delayed_work_sync(&conn->info_timer);
1567 
1568 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1569 		cancel_delayed_work_sync(&conn->security_timer);
1570 		smp_chan_destroy(conn);
1571 	}
1572 
1573 	hcon->l2cap_data = NULL;
1574 	conn->hchan = NULL;
1575 	l2cap_conn_put(conn);
1576 }
1577 
1578 static void security_timeout(struct work_struct *work)
1579 {
1580 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1581 					       security_timer.work);
1582 
1583 	BT_DBG("conn %p", conn);
1584 
1585 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1586 		smp_chan_destroy(conn);
1587 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1588 	}
1589 }
1590 
1591 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
1592 {
1593 	struct l2cap_conn *conn = hcon->l2cap_data;
1594 	struct hci_chan *hchan;
1595 
1596 	if (conn)
1597 		return conn;
1598 
1599 	hchan = hci_chan_create(hcon);
1600 	if (!hchan)
1601 		return NULL;
1602 
1603 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1604 	if (!conn) {
1605 		hci_chan_del(hchan);
1606 		return NULL;
1607 	}
1608 
1609 	kref_init(&conn->ref);
1610 	hcon->l2cap_data = conn;
1611 	conn->hcon = hcon;
1612 	hci_conn_get(conn->hcon);
1613 	conn->hchan = hchan;
1614 
1615 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1616 
1617 	switch (hcon->type) {
1618 	case LE_LINK:
1619 		if (hcon->hdev->le_mtu) {
1620 			conn->mtu = hcon->hdev->le_mtu;
1621 			break;
1622 		}
1623 		/* fall through */
1624 	default:
1625 		conn->mtu = hcon->hdev->acl_mtu;
1626 		break;
1627 	}
1628 
1629 	conn->src = &hcon->hdev->bdaddr;
1630 	conn->dst = &hcon->dst;
1631 
1632 	conn->feat_mask = 0;
1633 
1634 	spin_lock_init(&conn->lock);
1635 	mutex_init(&conn->chan_lock);
1636 
1637 	INIT_LIST_HEAD(&conn->chan_l);
1638 	INIT_LIST_HEAD(&conn->users);
1639 
1640 	if (hcon->type == LE_LINK)
1641 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1642 	else
1643 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1644 
1645 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1646 
1647 	return conn;
1648 }
1649 
1650 static void l2cap_conn_free(struct kref *ref)
1651 {
1652 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1653 
1654 	hci_conn_put(conn->hcon);
1655 	kfree(conn);
1656 }
1657 
1658 void l2cap_conn_get(struct l2cap_conn *conn)
1659 {
1660 	kref_get(&conn->ref);
1661 }
1662 EXPORT_SYMBOL(l2cap_conn_get);
1663 
1664 void l2cap_conn_put(struct l2cap_conn *conn)
1665 {
1666 	kref_put(&conn->ref, l2cap_conn_free);
1667 }
1668 EXPORT_SYMBOL(l2cap_conn_put);
1669 
1670 /* ---- Socket interface ---- */
1671 
1672 /* Find socket with psm and source / destination bdaddr.
1673  * Returns closest match.
1674  */
1675 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1676 						   bdaddr_t *src,
1677 						   bdaddr_t *dst)
1678 {
1679 	struct l2cap_chan *c, *c1 = NULL;
1680 
1681 	read_lock(&chan_list_lock);
1682 
1683 	list_for_each_entry(c, &chan_list, global_l) {
1684 		struct sock *sk = c->sk;
1685 
1686 		if (state && c->state != state)
1687 			continue;
1688 
1689 		if (c->psm == psm) {
1690 			int src_match, dst_match;
1691 			int src_any, dst_any;
1692 
1693 			/* Exact match. */
1694 			src_match = !bacmp(&bt_sk(sk)->src, src);
1695 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1696 			if (src_match && dst_match) {
1697 				read_unlock(&chan_list_lock);
1698 				return c;
1699 			}
1700 
1701 			/* Closest match */
1702 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1703 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1704 			if ((src_match && dst_any) || (src_any && dst_match) ||
1705 			    (src_any && dst_any))
1706 				c1 = c;
1707 		}
1708 	}
1709 
1710 	read_unlock(&chan_list_lock);
1711 
1712 	return c1;
1713 }
1714 
1715 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1716 		       bdaddr_t *dst, u8 dst_type)
1717 {
1718 	struct sock *sk = chan->sk;
1719 	bdaddr_t *src = &bt_sk(sk)->src;
1720 	struct l2cap_conn *conn;
1721 	struct hci_conn *hcon;
1722 	struct hci_dev *hdev;
1723 	__u8 auth_type;
1724 	int err;
1725 
1726 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1727 	       dst_type, __le16_to_cpu(psm));
1728 
1729 	hdev = hci_get_route(dst, src);
1730 	if (!hdev)
1731 		return -EHOSTUNREACH;
1732 
1733 	hci_dev_lock(hdev);
1734 
1735 	l2cap_chan_lock(chan);
1736 
1737 	/* PSM must be odd and lsb of upper byte must be 0 */
1738 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1739 	    chan->chan_type != L2CAP_CHAN_RAW) {
1740 		err = -EINVAL;
1741 		goto done;
1742 	}
1743 
1744 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1745 		err = -EINVAL;
1746 		goto done;
1747 	}
1748 
1749 	switch (chan->mode) {
1750 	case L2CAP_MODE_BASIC:
1751 		break;
1752 	case L2CAP_MODE_ERTM:
1753 	case L2CAP_MODE_STREAMING:
1754 		if (!disable_ertm)
1755 			break;
1756 		/* fall through */
1757 	default:
1758 		err = -ENOTSUPP;
1759 		goto done;
1760 	}
1761 
1762 	switch (chan->state) {
1763 	case BT_CONNECT:
1764 	case BT_CONNECT2:
1765 	case BT_CONFIG:
1766 		/* Already connecting */
1767 		err = 0;
1768 		goto done;
1769 
1770 	case BT_CONNECTED:
1771 		/* Already connected */
1772 		err = -EISCONN;
1773 		goto done;
1774 
1775 	case BT_OPEN:
1776 	case BT_BOUND:
1777 		/* Can connect */
1778 		break;
1779 
1780 	default:
1781 		err = -EBADFD;
1782 		goto done;
1783 	}
1784 
1785 	/* Set destination address and psm */
1786 	lock_sock(sk);
1787 	bacpy(&bt_sk(sk)->dst, dst);
1788 	release_sock(sk);
1789 
1790 	chan->psm = psm;
1791 	chan->dcid = cid;
1792 
1793 	auth_type = l2cap_get_auth_type(chan);
1794 
1795 	if (chan->dcid == L2CAP_CID_LE_DATA)
1796 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1797 				   chan->sec_level, auth_type);
1798 	else
1799 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1800 				   chan->sec_level, auth_type);
1801 
1802 	if (IS_ERR(hcon)) {
1803 		err = PTR_ERR(hcon);
1804 		goto done;
1805 	}
1806 
1807 	conn = l2cap_conn_add(hcon);
1808 	if (!conn) {
1809 		hci_conn_drop(hcon);
1810 		err = -ENOMEM;
1811 		goto done;
1812 	}
1813 
1814 	if (hcon->type == LE_LINK) {
1815 		err = 0;
1816 
1817 		if (!list_empty(&conn->chan_l)) {
1818 			err = -EBUSY;
1819 			hci_conn_drop(hcon);
1820 		}
1821 
1822 		if (err)
1823 			goto done;
1824 	}
1825 
1826 	/* Update source addr of the socket */
1827 	bacpy(src, conn->src);
1828 
1829 	l2cap_chan_unlock(chan);
1830 	l2cap_chan_add(conn, chan);
1831 	l2cap_chan_lock(chan);
1832 
1833 	l2cap_state_change(chan, BT_CONNECT);
1834 	__set_chan_timer(chan, sk->sk_sndtimeo);
1835 
1836 	if (hcon->state == BT_CONNECTED) {
1837 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1838 			__clear_chan_timer(chan);
1839 			if (l2cap_chan_check_security(chan))
1840 				l2cap_state_change(chan, BT_CONNECTED);
1841 		} else
1842 			l2cap_do_start(chan);
1843 	}
1844 
1845 	err = 0;
1846 
1847 done:
1848 	l2cap_chan_unlock(chan);
1849 	hci_dev_unlock(hdev);
1850 	hci_dev_put(hdev);
1851 	return err;
1852 }
1853 
1854 int __l2cap_wait_ack(struct sock *sk)
1855 {
1856 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1857 	DECLARE_WAITQUEUE(wait, current);
1858 	int err = 0;
1859 	int timeo = HZ/5;
1860 
1861 	add_wait_queue(sk_sleep(sk), &wait);
1862 	set_current_state(TASK_INTERRUPTIBLE);
1863 	while (chan->unacked_frames > 0 && chan->conn) {
1864 		if (!timeo)
1865 			timeo = HZ/5;
1866 
1867 		if (signal_pending(current)) {
1868 			err = sock_intr_errno(timeo);
1869 			break;
1870 		}
1871 
1872 		release_sock(sk);
1873 		timeo = schedule_timeout(timeo);
1874 		lock_sock(sk);
1875 		set_current_state(TASK_INTERRUPTIBLE);
1876 
1877 		err = sock_error(sk);
1878 		if (err)
1879 			break;
1880 	}
1881 	set_current_state(TASK_RUNNING);
1882 	remove_wait_queue(sk_sleep(sk), &wait);
1883 	return err;
1884 }
1885 
1886 static void l2cap_monitor_timeout(struct work_struct *work)
1887 {
1888 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1889 					       monitor_timer.work);
1890 
1891 	BT_DBG("chan %p", chan);
1892 
1893 	l2cap_chan_lock(chan);
1894 
1895 	if (!chan->conn) {
1896 		l2cap_chan_unlock(chan);
1897 		l2cap_chan_put(chan);
1898 		return;
1899 	}
1900 
1901 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1902 
1903 	l2cap_chan_unlock(chan);
1904 	l2cap_chan_put(chan);
1905 }
1906 
1907 static void l2cap_retrans_timeout(struct work_struct *work)
1908 {
1909 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1910 					       retrans_timer.work);
1911 
1912 	BT_DBG("chan %p", chan);
1913 
1914 	l2cap_chan_lock(chan);
1915 
1916 	if (!chan->conn) {
1917 		l2cap_chan_unlock(chan);
1918 		l2cap_chan_put(chan);
1919 		return;
1920 	}
1921 
1922 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1923 	l2cap_chan_unlock(chan);
1924 	l2cap_chan_put(chan);
1925 }
1926 
1927 static void l2cap_streaming_send(struct l2cap_chan *chan,
1928 				 struct sk_buff_head *skbs)
1929 {
1930 	struct sk_buff *skb;
1931 	struct l2cap_ctrl *control;
1932 
1933 	BT_DBG("chan %p, skbs %p", chan, skbs);
1934 
1935 	if (__chan_is_moving(chan))
1936 		return;
1937 
1938 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1939 
1940 	while (!skb_queue_empty(&chan->tx_q)) {
1941 
1942 		skb = skb_dequeue(&chan->tx_q);
1943 
1944 		bt_cb(skb)->control.retries = 1;
1945 		control = &bt_cb(skb)->control;
1946 
1947 		control->reqseq = 0;
1948 		control->txseq = chan->next_tx_seq;
1949 
1950 		__pack_control(chan, control, skb);
1951 
1952 		if (chan->fcs == L2CAP_FCS_CRC16) {
1953 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1954 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1955 		}
1956 
1957 		l2cap_do_send(chan, skb);
1958 
1959 		BT_DBG("Sent txseq %u", control->txseq);
1960 
1961 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1962 		chan->frames_sent++;
1963 	}
1964 }
1965 
1966 static int l2cap_ertm_send(struct l2cap_chan *chan)
1967 {
1968 	struct sk_buff *skb, *tx_skb;
1969 	struct l2cap_ctrl *control;
1970 	int sent = 0;
1971 
1972 	BT_DBG("chan %p", chan);
1973 
1974 	if (chan->state != BT_CONNECTED)
1975 		return -ENOTCONN;
1976 
1977 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1978 		return 0;
1979 
1980 	if (__chan_is_moving(chan))
1981 		return 0;
1982 
1983 	while (chan->tx_send_head &&
1984 	       chan->unacked_frames < chan->remote_tx_win &&
1985 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1986 
1987 		skb = chan->tx_send_head;
1988 
1989 		bt_cb(skb)->control.retries = 1;
1990 		control = &bt_cb(skb)->control;
1991 
1992 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1993 			control->final = 1;
1994 
1995 		control->reqseq = chan->buffer_seq;
1996 		chan->last_acked_seq = chan->buffer_seq;
1997 		control->txseq = chan->next_tx_seq;
1998 
1999 		__pack_control(chan, control, skb);
2000 
2001 		if (chan->fcs == L2CAP_FCS_CRC16) {
2002 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2003 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2004 		}
2005 
2006 		/* Clone after data has been modified. Data is assumed to be
2007 		   read-only (for locking purposes) on cloned sk_buffs.
2008 		 */
2009 		tx_skb = skb_clone(skb, GFP_KERNEL);
2010 
2011 		if (!tx_skb)
2012 			break;
2013 
2014 		__set_retrans_timer(chan);
2015 
2016 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2017 		chan->unacked_frames++;
2018 		chan->frames_sent++;
2019 		sent++;
2020 
2021 		if (skb_queue_is_last(&chan->tx_q, skb))
2022 			chan->tx_send_head = NULL;
2023 		else
2024 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2025 
2026 		l2cap_do_send(chan, tx_skb);
2027 		BT_DBG("Sent txseq %u", control->txseq);
2028 	}
2029 
2030 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2031 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2032 
2033 	return sent;
2034 }
2035 
2036 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2037 {
2038 	struct l2cap_ctrl control;
2039 	struct sk_buff *skb;
2040 	struct sk_buff *tx_skb;
2041 	u16 seq;
2042 
2043 	BT_DBG("chan %p", chan);
2044 
2045 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2046 		return;
2047 
2048 	if (__chan_is_moving(chan))
2049 		return;
2050 
2051 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2052 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2053 
2054 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2055 		if (!skb) {
2056 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2057 			       seq);
2058 			continue;
2059 		}
2060 
2061 		bt_cb(skb)->control.retries++;
2062 		control = bt_cb(skb)->control;
2063 
2064 		if (chan->max_tx != 0 &&
2065 		    bt_cb(skb)->control.retries > chan->max_tx) {
2066 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2067 			l2cap_send_disconn_req(chan, ECONNRESET);
2068 			l2cap_seq_list_clear(&chan->retrans_list);
2069 			break;
2070 		}
2071 
2072 		control.reqseq = chan->buffer_seq;
2073 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2074 			control.final = 1;
2075 		else
2076 			control.final = 0;
2077 
2078 		if (skb_cloned(skb)) {
2079 			/* Cloned sk_buffs are read-only, so we need a
2080 			 * writeable copy
2081 			 */
2082 			tx_skb = skb_copy(skb, GFP_KERNEL);
2083 		} else {
2084 			tx_skb = skb_clone(skb, GFP_KERNEL);
2085 		}
2086 
2087 		if (!tx_skb) {
2088 			l2cap_seq_list_clear(&chan->retrans_list);
2089 			break;
2090 		}
2091 
2092 		/* Update skb contents */
2093 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2094 			put_unaligned_le32(__pack_extended_control(&control),
2095 					   tx_skb->data + L2CAP_HDR_SIZE);
2096 		} else {
2097 			put_unaligned_le16(__pack_enhanced_control(&control),
2098 					   tx_skb->data + L2CAP_HDR_SIZE);
2099 		}
2100 
2101 		if (chan->fcs == L2CAP_FCS_CRC16) {
2102 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
2103 			put_unaligned_le16(fcs, skb_put(tx_skb,
2104 							L2CAP_FCS_SIZE));
2105 		}
2106 
2107 		l2cap_do_send(chan, tx_skb);
2108 
2109 		BT_DBG("Resent txseq %d", control.txseq);
2110 
2111 		chan->last_acked_seq = chan->buffer_seq;
2112 	}
2113 }
2114 
2115 static void l2cap_retransmit(struct l2cap_chan *chan,
2116 			     struct l2cap_ctrl *control)
2117 {
2118 	BT_DBG("chan %p, control %p", chan, control);
2119 
2120 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2121 	l2cap_ertm_resend(chan);
2122 }
2123 
2124 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2125 				 struct l2cap_ctrl *control)
2126 {
2127 	struct sk_buff *skb;
2128 
2129 	BT_DBG("chan %p, control %p", chan, control);
2130 
2131 	if (control->poll)
2132 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2133 
2134 	l2cap_seq_list_clear(&chan->retrans_list);
2135 
2136 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2137 		return;
2138 
2139 	if (chan->unacked_frames) {
2140 		skb_queue_walk(&chan->tx_q, skb) {
2141 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2142 			    skb == chan->tx_send_head)
2143 				break;
2144 		}
2145 
2146 		skb_queue_walk_from(&chan->tx_q, skb) {
2147 			if (skb == chan->tx_send_head)
2148 				break;
2149 
2150 			l2cap_seq_list_append(&chan->retrans_list,
2151 					      bt_cb(skb)->control.txseq);
2152 		}
2153 
2154 		l2cap_ertm_resend(chan);
2155 	}
2156 }
2157 
2158 static void l2cap_send_ack(struct l2cap_chan *chan)
2159 {
2160 	struct l2cap_ctrl control;
2161 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2162 					 chan->last_acked_seq);
2163 	int threshold;
2164 
2165 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2166 	       chan, chan->last_acked_seq, chan->buffer_seq);
2167 
2168 	memset(&control, 0, sizeof(control));
2169 	control.sframe = 1;
2170 
2171 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2172 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2173 		__clear_ack_timer(chan);
2174 		control.super = L2CAP_SUPER_RNR;
2175 		control.reqseq = chan->buffer_seq;
2176 		l2cap_send_sframe(chan, &control);
2177 	} else {
2178 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2179 			l2cap_ertm_send(chan);
2180 			/* If any i-frames were sent, they included an ack */
2181 			if (chan->buffer_seq == chan->last_acked_seq)
2182 				frames_to_ack = 0;
2183 		}
2184 
2185 		/* Ack now if the window is 3/4ths full.
2186 		 * Calculate without mul or div
2187 		 */
2188 		threshold = chan->ack_win;
2189 		threshold += threshold << 1;
2190 		threshold >>= 2;
2191 
2192 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2193 		       threshold);
2194 
2195 		if (frames_to_ack >= threshold) {
2196 			__clear_ack_timer(chan);
2197 			control.super = L2CAP_SUPER_RR;
2198 			control.reqseq = chan->buffer_seq;
2199 			l2cap_send_sframe(chan, &control);
2200 			frames_to_ack = 0;
2201 		}
2202 
2203 		if (frames_to_ack)
2204 			__set_ack_timer(chan);
2205 	}
2206 }
2207 
2208 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2209 					 struct msghdr *msg, int len,
2210 					 int count, struct sk_buff *skb)
2211 {
2212 	struct l2cap_conn *conn = chan->conn;
2213 	struct sk_buff **frag;
2214 	int sent = 0;
2215 
2216 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2217 		return -EFAULT;
2218 
2219 	sent += count;
2220 	len  -= count;
2221 
2222 	/* Continuation fragments (no L2CAP header) */
2223 	frag = &skb_shinfo(skb)->frag_list;
2224 	while (len) {
2225 		struct sk_buff *tmp;
2226 
2227 		count = min_t(unsigned int, conn->mtu, len);
2228 
2229 		tmp = chan->ops->alloc_skb(chan, count,
2230 					   msg->msg_flags & MSG_DONTWAIT);
2231 		if (IS_ERR(tmp))
2232 			return PTR_ERR(tmp);
2233 
2234 		*frag = tmp;
2235 
2236 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2237 			return -EFAULT;
2238 
2239 		(*frag)->priority = skb->priority;
2240 
2241 		sent += count;
2242 		len  -= count;
2243 
2244 		skb->len += (*frag)->len;
2245 		skb->data_len += (*frag)->len;
2246 
2247 		frag = &(*frag)->next;
2248 	}
2249 
2250 	return sent;
2251 }
2252 
2253 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2254 						 struct msghdr *msg, size_t len,
2255 						 u32 priority)
2256 {
2257 	struct l2cap_conn *conn = chan->conn;
2258 	struct sk_buff *skb;
2259 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2260 	struct l2cap_hdr *lh;
2261 
2262 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2263 
2264 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2265 
2266 	skb = chan->ops->alloc_skb(chan, count + hlen,
2267 				   msg->msg_flags & MSG_DONTWAIT);
2268 	if (IS_ERR(skb))
2269 		return skb;
2270 
2271 	skb->priority = priority;
2272 
2273 	/* Create L2CAP header */
2274 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2275 	lh->cid = cpu_to_le16(chan->dcid);
2276 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2277 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2278 
2279 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2280 	if (unlikely(err < 0)) {
2281 		kfree_skb(skb);
2282 		return ERR_PTR(err);
2283 	}
2284 	return skb;
2285 }
2286 
2287 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2288 					      struct msghdr *msg, size_t len,
2289 					      u32 priority)
2290 {
2291 	struct l2cap_conn *conn = chan->conn;
2292 	struct sk_buff *skb;
2293 	int err, count;
2294 	struct l2cap_hdr *lh;
2295 
2296 	BT_DBG("chan %p len %zu", chan, len);
2297 
2298 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2299 
2300 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2301 				   msg->msg_flags & MSG_DONTWAIT);
2302 	if (IS_ERR(skb))
2303 		return skb;
2304 
2305 	skb->priority = priority;
2306 
2307 	/* Create L2CAP header */
2308 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2309 	lh->cid = cpu_to_le16(chan->dcid);
2310 	lh->len = cpu_to_le16(len);
2311 
2312 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2313 	if (unlikely(err < 0)) {
2314 		kfree_skb(skb);
2315 		return ERR_PTR(err);
2316 	}
2317 	return skb;
2318 }
2319 
2320 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2321 					       struct msghdr *msg, size_t len,
2322 					       u16 sdulen)
2323 {
2324 	struct l2cap_conn *conn = chan->conn;
2325 	struct sk_buff *skb;
2326 	int err, count, hlen;
2327 	struct l2cap_hdr *lh;
2328 
2329 	BT_DBG("chan %p len %zu", chan, len);
2330 
2331 	if (!conn)
2332 		return ERR_PTR(-ENOTCONN);
2333 
2334 	hlen = __ertm_hdr_size(chan);
2335 
2336 	if (sdulen)
2337 		hlen += L2CAP_SDULEN_SIZE;
2338 
2339 	if (chan->fcs == L2CAP_FCS_CRC16)
2340 		hlen += L2CAP_FCS_SIZE;
2341 
2342 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2343 
2344 	skb = chan->ops->alloc_skb(chan, count + hlen,
2345 				   msg->msg_flags & MSG_DONTWAIT);
2346 	if (IS_ERR(skb))
2347 		return skb;
2348 
2349 	/* Create L2CAP header */
2350 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2351 	lh->cid = cpu_to_le16(chan->dcid);
2352 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2353 
2354 	/* Control header is populated later */
2355 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2356 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2357 	else
2358 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2359 
2360 	if (sdulen)
2361 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2362 
2363 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2364 	if (unlikely(err < 0)) {
2365 		kfree_skb(skb);
2366 		return ERR_PTR(err);
2367 	}
2368 
2369 	bt_cb(skb)->control.fcs = chan->fcs;
2370 	bt_cb(skb)->control.retries = 0;
2371 	return skb;
2372 }
2373 
2374 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2375 			     struct sk_buff_head *seg_queue,
2376 			     struct msghdr *msg, size_t len)
2377 {
2378 	struct sk_buff *skb;
2379 	u16 sdu_len;
2380 	size_t pdu_len;
2381 	u8 sar;
2382 
2383 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2384 
2385 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2386 	 * so fragmented skbs are not used.  The HCI layer's handling
2387 	 * of fragmented skbs is not compatible with ERTM's queueing.
2388 	 */
2389 
2390 	/* PDU size is derived from the HCI MTU */
2391 	pdu_len = chan->conn->mtu;
2392 
2393 	/* Constrain PDU size for BR/EDR connections */
2394 	if (!chan->hs_hcon)
2395 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2396 
2397 	/* Adjust for largest possible L2CAP overhead. */
2398 	if (chan->fcs)
2399 		pdu_len -= L2CAP_FCS_SIZE;
2400 
2401 	pdu_len -= __ertm_hdr_size(chan);
2402 
2403 	/* Remote device may have requested smaller PDUs */
2404 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2405 
2406 	if (len <= pdu_len) {
2407 		sar = L2CAP_SAR_UNSEGMENTED;
2408 		sdu_len = 0;
2409 		pdu_len = len;
2410 	} else {
2411 		sar = L2CAP_SAR_START;
2412 		sdu_len = len;
2413 		pdu_len -= L2CAP_SDULEN_SIZE;
2414 	}
2415 
2416 	while (len > 0) {
2417 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2418 
2419 		if (IS_ERR(skb)) {
2420 			__skb_queue_purge(seg_queue);
2421 			return PTR_ERR(skb);
2422 		}
2423 
2424 		bt_cb(skb)->control.sar = sar;
2425 		__skb_queue_tail(seg_queue, skb);
2426 
2427 		len -= pdu_len;
2428 		if (sdu_len) {
2429 			sdu_len = 0;
2430 			pdu_len += L2CAP_SDULEN_SIZE;
2431 		}
2432 
2433 		if (len <= pdu_len) {
2434 			sar = L2CAP_SAR_END;
2435 			pdu_len = len;
2436 		} else {
2437 			sar = L2CAP_SAR_CONTINUE;
2438 		}
2439 	}
2440 
2441 	return 0;
2442 }
2443 
2444 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2445 		    u32 priority)
2446 {
2447 	struct sk_buff *skb;
2448 	int err;
2449 	struct sk_buff_head seg_queue;
2450 
2451 	/* Connectionless channel */
2452 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2453 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2454 		if (IS_ERR(skb))
2455 			return PTR_ERR(skb);
2456 
2457 		l2cap_do_send(chan, skb);
2458 		return len;
2459 	}
2460 
2461 	switch (chan->mode) {
2462 	case L2CAP_MODE_BASIC:
2463 		/* Check outgoing MTU */
2464 		if (len > chan->omtu)
2465 			return -EMSGSIZE;
2466 
2467 		/* Create a basic PDU */
2468 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2469 		if (IS_ERR(skb))
2470 			return PTR_ERR(skb);
2471 
2472 		l2cap_do_send(chan, skb);
2473 		err = len;
2474 		break;
2475 
2476 	case L2CAP_MODE_ERTM:
2477 	case L2CAP_MODE_STREAMING:
2478 		/* Check outgoing MTU */
2479 		if (len > chan->omtu) {
2480 			err = -EMSGSIZE;
2481 			break;
2482 		}
2483 
2484 		__skb_queue_head_init(&seg_queue);
2485 
2486 		/* Do segmentation before calling in to the state machine,
2487 		 * since it's possible to block while waiting for memory
2488 		 * allocation.
2489 		 */
2490 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2491 
2492 		/* The channel could have been closed while segmenting,
2493 		 * check that it is still connected.
2494 		 */
2495 		if (chan->state != BT_CONNECTED) {
2496 			__skb_queue_purge(&seg_queue);
2497 			err = -ENOTCONN;
2498 		}
2499 
2500 		if (err)
2501 			break;
2502 
2503 		if (chan->mode == L2CAP_MODE_ERTM)
2504 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2505 		else
2506 			l2cap_streaming_send(chan, &seg_queue);
2507 
2508 		err = len;
2509 
2510 		/* If the skbs were not queued for sending, they'll still be in
2511 		 * seg_queue and need to be purged.
2512 		 */
2513 		__skb_queue_purge(&seg_queue);
2514 		break;
2515 
2516 	default:
2517 		BT_DBG("bad state %1.1x", chan->mode);
2518 		err = -EBADFD;
2519 	}
2520 
2521 	return err;
2522 }
2523 
2524 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2525 {
2526 	struct l2cap_ctrl control;
2527 	u16 seq;
2528 
2529 	BT_DBG("chan %p, txseq %u", chan, txseq);
2530 
2531 	memset(&control, 0, sizeof(control));
2532 	control.sframe = 1;
2533 	control.super = L2CAP_SUPER_SREJ;
2534 
2535 	for (seq = chan->expected_tx_seq; seq != txseq;
2536 	     seq = __next_seq(chan, seq)) {
2537 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2538 			control.reqseq = seq;
2539 			l2cap_send_sframe(chan, &control);
2540 			l2cap_seq_list_append(&chan->srej_list, seq);
2541 		}
2542 	}
2543 
2544 	chan->expected_tx_seq = __next_seq(chan, txseq);
2545 }
2546 
2547 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2548 {
2549 	struct l2cap_ctrl control;
2550 
2551 	BT_DBG("chan %p", chan);
2552 
2553 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2554 		return;
2555 
2556 	memset(&control, 0, sizeof(control));
2557 	control.sframe = 1;
2558 	control.super = L2CAP_SUPER_SREJ;
2559 	control.reqseq = chan->srej_list.tail;
2560 	l2cap_send_sframe(chan, &control);
2561 }
2562 
2563 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2564 {
2565 	struct l2cap_ctrl control;
2566 	u16 initial_head;
2567 	u16 seq;
2568 
2569 	BT_DBG("chan %p, txseq %u", chan, txseq);
2570 
2571 	memset(&control, 0, sizeof(control));
2572 	control.sframe = 1;
2573 	control.super = L2CAP_SUPER_SREJ;
2574 
2575 	/* Capture initial list head to allow only one pass through the list. */
2576 	initial_head = chan->srej_list.head;
2577 
2578 	do {
2579 		seq = l2cap_seq_list_pop(&chan->srej_list);
2580 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2581 			break;
2582 
2583 		control.reqseq = seq;
2584 		l2cap_send_sframe(chan, &control);
2585 		l2cap_seq_list_append(&chan->srej_list, seq);
2586 	} while (chan->srej_list.head != initial_head);
2587 }
2588 
2589 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2590 {
2591 	struct sk_buff *acked_skb;
2592 	u16 ackseq;
2593 
2594 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2595 
2596 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2597 		return;
2598 
2599 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2600 	       chan->expected_ack_seq, chan->unacked_frames);
2601 
2602 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2603 	     ackseq = __next_seq(chan, ackseq)) {
2604 
2605 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2606 		if (acked_skb) {
2607 			skb_unlink(acked_skb, &chan->tx_q);
2608 			kfree_skb(acked_skb);
2609 			chan->unacked_frames--;
2610 		}
2611 	}
2612 
2613 	chan->expected_ack_seq = reqseq;
2614 
2615 	if (chan->unacked_frames == 0)
2616 		__clear_retrans_timer(chan);
2617 
2618 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2619 }
2620 
2621 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2622 {
2623 	BT_DBG("chan %p", chan);
2624 
2625 	chan->expected_tx_seq = chan->buffer_seq;
2626 	l2cap_seq_list_clear(&chan->srej_list);
2627 	skb_queue_purge(&chan->srej_q);
2628 	chan->rx_state = L2CAP_RX_STATE_RECV;
2629 }
2630 
2631 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2632 				struct l2cap_ctrl *control,
2633 				struct sk_buff_head *skbs, u8 event)
2634 {
2635 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 	       event);
2637 
2638 	switch (event) {
2639 	case L2CAP_EV_DATA_REQUEST:
2640 		if (chan->tx_send_head == NULL)
2641 			chan->tx_send_head = skb_peek(skbs);
2642 
2643 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2644 		l2cap_ertm_send(chan);
2645 		break;
2646 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2647 		BT_DBG("Enter LOCAL_BUSY");
2648 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2649 
2650 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2651 			/* The SREJ_SENT state must be aborted if we are to
2652 			 * enter the LOCAL_BUSY state.
2653 			 */
2654 			l2cap_abort_rx_srej_sent(chan);
2655 		}
2656 
2657 		l2cap_send_ack(chan);
2658 
2659 		break;
2660 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2661 		BT_DBG("Exit LOCAL_BUSY");
2662 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663 
2664 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2665 			struct l2cap_ctrl local_control;
2666 
2667 			memset(&local_control, 0, sizeof(local_control));
2668 			local_control.sframe = 1;
2669 			local_control.super = L2CAP_SUPER_RR;
2670 			local_control.poll = 1;
2671 			local_control.reqseq = chan->buffer_seq;
2672 			l2cap_send_sframe(chan, &local_control);
2673 
2674 			chan->retry_count = 1;
2675 			__set_monitor_timer(chan);
2676 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2677 		}
2678 		break;
2679 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2680 		l2cap_process_reqseq(chan, control->reqseq);
2681 		break;
2682 	case L2CAP_EV_EXPLICIT_POLL:
2683 		l2cap_send_rr_or_rnr(chan, 1);
2684 		chan->retry_count = 1;
2685 		__set_monitor_timer(chan);
2686 		__clear_ack_timer(chan);
2687 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 		break;
2689 	case L2CAP_EV_RETRANS_TO:
2690 		l2cap_send_rr_or_rnr(chan, 1);
2691 		chan->retry_count = 1;
2692 		__set_monitor_timer(chan);
2693 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 		break;
2695 	case L2CAP_EV_RECV_FBIT:
2696 		/* Nothing to process */
2697 		break;
2698 	default:
2699 		break;
2700 	}
2701 }
2702 
2703 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2704 				  struct l2cap_ctrl *control,
2705 				  struct sk_buff_head *skbs, u8 event)
2706 {
2707 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 	       event);
2709 
2710 	switch (event) {
2711 	case L2CAP_EV_DATA_REQUEST:
2712 		if (chan->tx_send_head == NULL)
2713 			chan->tx_send_head = skb_peek(skbs);
2714 		/* Queue data, but don't send. */
2715 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2716 		break;
2717 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 		BT_DBG("Enter LOCAL_BUSY");
2719 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720 
2721 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 			/* The SREJ_SENT state must be aborted if we are to
2723 			 * enter the LOCAL_BUSY state.
2724 			 */
2725 			l2cap_abort_rx_srej_sent(chan);
2726 		}
2727 
2728 		l2cap_send_ack(chan);
2729 
2730 		break;
2731 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 		BT_DBG("Exit LOCAL_BUSY");
2733 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734 
2735 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 			struct l2cap_ctrl local_control;
2737 			memset(&local_control, 0, sizeof(local_control));
2738 			local_control.sframe = 1;
2739 			local_control.super = L2CAP_SUPER_RR;
2740 			local_control.poll = 1;
2741 			local_control.reqseq = chan->buffer_seq;
2742 			l2cap_send_sframe(chan, &local_control);
2743 
2744 			chan->retry_count = 1;
2745 			__set_monitor_timer(chan);
2746 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 		}
2748 		break;
2749 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 		l2cap_process_reqseq(chan, control->reqseq);
2751 
2752 		/* Fall through */
2753 
2754 	case L2CAP_EV_RECV_FBIT:
2755 		if (control && control->final) {
2756 			__clear_monitor_timer(chan);
2757 			if (chan->unacked_frames > 0)
2758 				__set_retrans_timer(chan);
2759 			chan->retry_count = 0;
2760 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2762 		}
2763 		break;
2764 	case L2CAP_EV_EXPLICIT_POLL:
2765 		/* Ignore */
2766 		break;
2767 	case L2CAP_EV_MONITOR_TO:
2768 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2769 			l2cap_send_rr_or_rnr(chan, 1);
2770 			__set_monitor_timer(chan);
2771 			chan->retry_count++;
2772 		} else {
2773 			l2cap_send_disconn_req(chan, ECONNABORTED);
2774 		}
2775 		break;
2776 	default:
2777 		break;
2778 	}
2779 }
2780 
2781 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2782 		     struct sk_buff_head *skbs, u8 event)
2783 {
2784 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2785 	       chan, control, skbs, event, chan->tx_state);
2786 
2787 	switch (chan->tx_state) {
2788 	case L2CAP_TX_STATE_XMIT:
2789 		l2cap_tx_state_xmit(chan, control, skbs, event);
2790 		break;
2791 	case L2CAP_TX_STATE_WAIT_F:
2792 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2793 		break;
2794 	default:
2795 		/* Ignore event */
2796 		break;
2797 	}
2798 }
2799 
2800 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2801 			     struct l2cap_ctrl *control)
2802 {
2803 	BT_DBG("chan %p, control %p", chan, control);
2804 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2805 }
2806 
2807 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2808 				  struct l2cap_ctrl *control)
2809 {
2810 	BT_DBG("chan %p, control %p", chan, control);
2811 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2812 }
2813 
2814 /* Copy frame to all raw sockets on that connection */
2815 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2816 {
2817 	struct sk_buff *nskb;
2818 	struct l2cap_chan *chan;
2819 
2820 	BT_DBG("conn %p", conn);
2821 
2822 	mutex_lock(&conn->chan_lock);
2823 
2824 	list_for_each_entry(chan, &conn->chan_l, list) {
2825 		struct sock *sk = chan->sk;
2826 		if (chan->chan_type != L2CAP_CHAN_RAW)
2827 			continue;
2828 
2829 		/* Don't send frame to the socket it came from */
2830 		if (skb->sk == sk)
2831 			continue;
2832 		nskb = skb_clone(skb, GFP_KERNEL);
2833 		if (!nskb)
2834 			continue;
2835 
2836 		if (chan->ops->recv(chan, nskb))
2837 			kfree_skb(nskb);
2838 	}
2839 
2840 	mutex_unlock(&conn->chan_lock);
2841 }
2842 
2843 /* ---- L2CAP signalling commands ---- */
2844 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2845 				       u8 ident, u16 dlen, void *data)
2846 {
2847 	struct sk_buff *skb, **frag;
2848 	struct l2cap_cmd_hdr *cmd;
2849 	struct l2cap_hdr *lh;
2850 	int len, count;
2851 
2852 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2853 	       conn, code, ident, dlen);
2854 
2855 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2856 	count = min_t(unsigned int, conn->mtu, len);
2857 
2858 	skb = bt_skb_alloc(count, GFP_KERNEL);
2859 	if (!skb)
2860 		return NULL;
2861 
2862 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2863 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2864 
2865 	if (conn->hcon->type == LE_LINK)
2866 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2867 	else
2868 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2869 
2870 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2871 	cmd->code  = code;
2872 	cmd->ident = ident;
2873 	cmd->len   = cpu_to_le16(dlen);
2874 
2875 	if (dlen) {
2876 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2877 		memcpy(skb_put(skb, count), data, count);
2878 		data += count;
2879 	}
2880 
2881 	len -= skb->len;
2882 
2883 	/* Continuation fragments (no L2CAP header) */
2884 	frag = &skb_shinfo(skb)->frag_list;
2885 	while (len) {
2886 		count = min_t(unsigned int, conn->mtu, len);
2887 
2888 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2889 		if (!*frag)
2890 			goto fail;
2891 
2892 		memcpy(skb_put(*frag, count), data, count);
2893 
2894 		len  -= count;
2895 		data += count;
2896 
2897 		frag = &(*frag)->next;
2898 	}
2899 
2900 	return skb;
2901 
2902 fail:
2903 	kfree_skb(skb);
2904 	return NULL;
2905 }
2906 
2907 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2908 				     unsigned long *val)
2909 {
2910 	struct l2cap_conf_opt *opt = *ptr;
2911 	int len;
2912 
2913 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2914 	*ptr += len;
2915 
2916 	*type = opt->type;
2917 	*olen = opt->len;
2918 
2919 	switch (opt->len) {
2920 	case 1:
2921 		*val = *((u8 *) opt->val);
2922 		break;
2923 
2924 	case 2:
2925 		*val = get_unaligned_le16(opt->val);
2926 		break;
2927 
2928 	case 4:
2929 		*val = get_unaligned_le32(opt->val);
2930 		break;
2931 
2932 	default:
2933 		*val = (unsigned long) opt->val;
2934 		break;
2935 	}
2936 
2937 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2938 	return len;
2939 }
2940 
2941 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2942 {
2943 	struct l2cap_conf_opt *opt = *ptr;
2944 
2945 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2946 
2947 	opt->type = type;
2948 	opt->len  = len;
2949 
2950 	switch (len) {
2951 	case 1:
2952 		*((u8 *) opt->val)  = val;
2953 		break;
2954 
2955 	case 2:
2956 		put_unaligned_le16(val, opt->val);
2957 		break;
2958 
2959 	case 4:
2960 		put_unaligned_le32(val, opt->val);
2961 		break;
2962 
2963 	default:
2964 		memcpy(opt->val, (void *) val, len);
2965 		break;
2966 	}
2967 
2968 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2969 }
2970 
2971 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2972 {
2973 	struct l2cap_conf_efs efs;
2974 
2975 	switch (chan->mode) {
2976 	case L2CAP_MODE_ERTM:
2977 		efs.id		= chan->local_id;
2978 		efs.stype	= chan->local_stype;
2979 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2980 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2981 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2982 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2983 		break;
2984 
2985 	case L2CAP_MODE_STREAMING:
2986 		efs.id		= 1;
2987 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2988 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2989 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2990 		efs.acc_lat	= 0;
2991 		efs.flush_to	= 0;
2992 		break;
2993 
2994 	default:
2995 		return;
2996 	}
2997 
2998 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2999 			   (unsigned long) &efs);
3000 }
3001 
3002 static void l2cap_ack_timeout(struct work_struct *work)
3003 {
3004 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3005 					       ack_timer.work);
3006 	u16 frames_to_ack;
3007 
3008 	BT_DBG("chan %p", chan);
3009 
3010 	l2cap_chan_lock(chan);
3011 
3012 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3013 				     chan->last_acked_seq);
3014 
3015 	if (frames_to_ack)
3016 		l2cap_send_rr_or_rnr(chan, 0);
3017 
3018 	l2cap_chan_unlock(chan);
3019 	l2cap_chan_put(chan);
3020 }
3021 
3022 int l2cap_ertm_init(struct l2cap_chan *chan)
3023 {
3024 	int err;
3025 
3026 	chan->next_tx_seq = 0;
3027 	chan->expected_tx_seq = 0;
3028 	chan->expected_ack_seq = 0;
3029 	chan->unacked_frames = 0;
3030 	chan->buffer_seq = 0;
3031 	chan->frames_sent = 0;
3032 	chan->last_acked_seq = 0;
3033 	chan->sdu = NULL;
3034 	chan->sdu_last_frag = NULL;
3035 	chan->sdu_len = 0;
3036 
3037 	skb_queue_head_init(&chan->tx_q);
3038 
3039 	chan->local_amp_id = 0;
3040 	chan->move_id = 0;
3041 	chan->move_state = L2CAP_MOVE_STABLE;
3042 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3043 
3044 	if (chan->mode != L2CAP_MODE_ERTM)
3045 		return 0;
3046 
3047 	chan->rx_state = L2CAP_RX_STATE_RECV;
3048 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3049 
3050 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3051 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3052 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3053 
3054 	skb_queue_head_init(&chan->srej_q);
3055 
3056 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3057 	if (err < 0)
3058 		return err;
3059 
3060 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3061 	if (err < 0)
3062 		l2cap_seq_list_free(&chan->srej_list);
3063 
3064 	return err;
3065 }
3066 
3067 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3068 {
3069 	switch (mode) {
3070 	case L2CAP_MODE_STREAMING:
3071 	case L2CAP_MODE_ERTM:
3072 		if (l2cap_mode_supported(mode, remote_feat_mask))
3073 			return mode;
3074 		/* fall through */
3075 	default:
3076 		return L2CAP_MODE_BASIC;
3077 	}
3078 }
3079 
3080 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
3081 {
3082 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3083 }
3084 
3085 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
3086 {
3087 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3088 }
3089 
3090 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3091 				      struct l2cap_conf_rfc *rfc)
3092 {
3093 	if (chan->local_amp_id && chan->hs_hcon) {
3094 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3095 
3096 		/* Class 1 devices have must have ERTM timeouts
3097 		 * exceeding the Link Supervision Timeout.  The
3098 		 * default Link Supervision Timeout for AMP
3099 		 * controllers is 10 seconds.
3100 		 *
3101 		 * Class 1 devices use 0xffffffff for their
3102 		 * best-effort flush timeout, so the clamping logic
3103 		 * will result in a timeout that meets the above
3104 		 * requirement.  ERTM timeouts are 16-bit values, so
3105 		 * the maximum timeout is 65.535 seconds.
3106 		 */
3107 
3108 		/* Convert timeout to milliseconds and round */
3109 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3110 
3111 		/* This is the recommended formula for class 2 devices
3112 		 * that start ERTM timers when packets are sent to the
3113 		 * controller.
3114 		 */
3115 		ertm_to = 3 * ertm_to + 500;
3116 
3117 		if (ertm_to > 0xffff)
3118 			ertm_to = 0xffff;
3119 
3120 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3121 		rfc->monitor_timeout = rfc->retrans_timeout;
3122 	} else {
3123 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3124 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3125 	}
3126 }
3127 
3128 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3129 {
3130 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3131 	    __l2cap_ews_supported(chan)) {
3132 		/* use extended control field */
3133 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3134 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3135 	} else {
3136 		chan->tx_win = min_t(u16, chan->tx_win,
3137 				     L2CAP_DEFAULT_TX_WINDOW);
3138 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3139 	}
3140 	chan->ack_win = chan->tx_win;
3141 }
3142 
3143 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3144 {
3145 	struct l2cap_conf_req *req = data;
3146 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3147 	void *ptr = req->data;
3148 	u16 size;
3149 
3150 	BT_DBG("chan %p", chan);
3151 
3152 	if (chan->num_conf_req || chan->num_conf_rsp)
3153 		goto done;
3154 
3155 	switch (chan->mode) {
3156 	case L2CAP_MODE_STREAMING:
3157 	case L2CAP_MODE_ERTM:
3158 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3159 			break;
3160 
3161 		if (__l2cap_efs_supported(chan))
3162 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3163 
3164 		/* fall through */
3165 	default:
3166 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3167 		break;
3168 	}
3169 
3170 done:
3171 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3172 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3173 
3174 	switch (chan->mode) {
3175 	case L2CAP_MODE_BASIC:
3176 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3177 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3178 			break;
3179 
3180 		rfc.mode            = L2CAP_MODE_BASIC;
3181 		rfc.txwin_size      = 0;
3182 		rfc.max_transmit    = 0;
3183 		rfc.retrans_timeout = 0;
3184 		rfc.monitor_timeout = 0;
3185 		rfc.max_pdu_size    = 0;
3186 
3187 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3188 				   (unsigned long) &rfc);
3189 		break;
3190 
3191 	case L2CAP_MODE_ERTM:
3192 		rfc.mode            = L2CAP_MODE_ERTM;
3193 		rfc.max_transmit    = chan->max_tx;
3194 
3195 		__l2cap_set_ertm_timeouts(chan, &rfc);
3196 
3197 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3198 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3199 			     L2CAP_FCS_SIZE);
3200 		rfc.max_pdu_size = cpu_to_le16(size);
3201 
3202 		l2cap_txwin_setup(chan);
3203 
3204 		rfc.txwin_size = min_t(u16, chan->tx_win,
3205 				       L2CAP_DEFAULT_TX_WINDOW);
3206 
3207 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3208 				   (unsigned long) &rfc);
3209 
3210 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3211 			l2cap_add_opt_efs(&ptr, chan);
3212 
3213 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3214 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3215 					   chan->tx_win);
3216 
3217 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3218 			if (chan->fcs == L2CAP_FCS_NONE ||
3219 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3220 				chan->fcs = L2CAP_FCS_NONE;
3221 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3222 						   chan->fcs);
3223 			}
3224 		break;
3225 
3226 	case L2CAP_MODE_STREAMING:
3227 		l2cap_txwin_setup(chan);
3228 		rfc.mode            = L2CAP_MODE_STREAMING;
3229 		rfc.txwin_size      = 0;
3230 		rfc.max_transmit    = 0;
3231 		rfc.retrans_timeout = 0;
3232 		rfc.monitor_timeout = 0;
3233 
3234 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3235 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3236 			     L2CAP_FCS_SIZE);
3237 		rfc.max_pdu_size = cpu_to_le16(size);
3238 
3239 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3240 				   (unsigned long) &rfc);
3241 
3242 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3243 			l2cap_add_opt_efs(&ptr, chan);
3244 
3245 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3246 			if (chan->fcs == L2CAP_FCS_NONE ||
3247 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3248 				chan->fcs = L2CAP_FCS_NONE;
3249 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3250 						   chan->fcs);
3251 			}
3252 		break;
3253 	}
3254 
3255 	req->dcid  = cpu_to_le16(chan->dcid);
3256 	req->flags = __constant_cpu_to_le16(0);
3257 
3258 	return ptr - data;
3259 }
3260 
3261 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3262 {
3263 	struct l2cap_conf_rsp *rsp = data;
3264 	void *ptr = rsp->data;
3265 	void *req = chan->conf_req;
3266 	int len = chan->conf_len;
3267 	int type, hint, olen;
3268 	unsigned long val;
3269 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3270 	struct l2cap_conf_efs efs;
3271 	u8 remote_efs = 0;
3272 	u16 mtu = L2CAP_DEFAULT_MTU;
3273 	u16 result = L2CAP_CONF_SUCCESS;
3274 	u16 size;
3275 
3276 	BT_DBG("chan %p", chan);
3277 
3278 	while (len >= L2CAP_CONF_OPT_SIZE) {
3279 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3280 
3281 		hint  = type & L2CAP_CONF_HINT;
3282 		type &= L2CAP_CONF_MASK;
3283 
3284 		switch (type) {
3285 		case L2CAP_CONF_MTU:
3286 			mtu = val;
3287 			break;
3288 
3289 		case L2CAP_CONF_FLUSH_TO:
3290 			chan->flush_to = val;
3291 			break;
3292 
3293 		case L2CAP_CONF_QOS:
3294 			break;
3295 
3296 		case L2CAP_CONF_RFC:
3297 			if (olen == sizeof(rfc))
3298 				memcpy(&rfc, (void *) val, olen);
3299 			break;
3300 
3301 		case L2CAP_CONF_FCS:
3302 			if (val == L2CAP_FCS_NONE)
3303 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3304 			break;
3305 
3306 		case L2CAP_CONF_EFS:
3307 			remote_efs = 1;
3308 			if (olen == sizeof(efs))
3309 				memcpy(&efs, (void *) val, olen);
3310 			break;
3311 
3312 		case L2CAP_CONF_EWS:
3313 			if (!enable_hs)
3314 				return -ECONNREFUSED;
3315 
3316 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3317 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3318 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3319 			chan->remote_tx_win = val;
3320 			break;
3321 
3322 		default:
3323 			if (hint)
3324 				break;
3325 
3326 			result = L2CAP_CONF_UNKNOWN;
3327 			*((u8 *) ptr++) = type;
3328 			break;
3329 		}
3330 	}
3331 
3332 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3333 		goto done;
3334 
3335 	switch (chan->mode) {
3336 	case L2CAP_MODE_STREAMING:
3337 	case L2CAP_MODE_ERTM:
3338 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3339 			chan->mode = l2cap_select_mode(rfc.mode,
3340 						       chan->conn->feat_mask);
3341 			break;
3342 		}
3343 
3344 		if (remote_efs) {
3345 			if (__l2cap_efs_supported(chan))
3346 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3347 			else
3348 				return -ECONNREFUSED;
3349 		}
3350 
3351 		if (chan->mode != rfc.mode)
3352 			return -ECONNREFUSED;
3353 
3354 		break;
3355 	}
3356 
3357 done:
3358 	if (chan->mode != rfc.mode) {
3359 		result = L2CAP_CONF_UNACCEPT;
3360 		rfc.mode = chan->mode;
3361 
3362 		if (chan->num_conf_rsp == 1)
3363 			return -ECONNREFUSED;
3364 
3365 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3366 				   (unsigned long) &rfc);
3367 	}
3368 
3369 	if (result == L2CAP_CONF_SUCCESS) {
3370 		/* Configure output options and let the other side know
3371 		 * which ones we don't like. */
3372 
3373 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3374 			result = L2CAP_CONF_UNACCEPT;
3375 		else {
3376 			chan->omtu = mtu;
3377 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3378 		}
3379 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3380 
3381 		if (remote_efs) {
3382 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3383 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3384 			    efs.stype != chan->local_stype) {
3385 
3386 				result = L2CAP_CONF_UNACCEPT;
3387 
3388 				if (chan->num_conf_req >= 1)
3389 					return -ECONNREFUSED;
3390 
3391 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3392 						   sizeof(efs),
3393 						   (unsigned long) &efs);
3394 			} else {
3395 				/* Send PENDING Conf Rsp */
3396 				result = L2CAP_CONF_PENDING;
3397 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3398 			}
3399 		}
3400 
3401 		switch (rfc.mode) {
3402 		case L2CAP_MODE_BASIC:
3403 			chan->fcs = L2CAP_FCS_NONE;
3404 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3405 			break;
3406 
3407 		case L2CAP_MODE_ERTM:
3408 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3409 				chan->remote_tx_win = rfc.txwin_size;
3410 			else
3411 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3412 
3413 			chan->remote_max_tx = rfc.max_transmit;
3414 
3415 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3416 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3417 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3418 			rfc.max_pdu_size = cpu_to_le16(size);
3419 			chan->remote_mps = size;
3420 
3421 			__l2cap_set_ertm_timeouts(chan, &rfc);
3422 
3423 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3424 
3425 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3426 					   sizeof(rfc), (unsigned long) &rfc);
3427 
3428 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3429 				chan->remote_id = efs.id;
3430 				chan->remote_stype = efs.stype;
3431 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3432 				chan->remote_flush_to =
3433 					le32_to_cpu(efs.flush_to);
3434 				chan->remote_acc_lat =
3435 					le32_to_cpu(efs.acc_lat);
3436 				chan->remote_sdu_itime =
3437 					le32_to_cpu(efs.sdu_itime);
3438 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3439 						   sizeof(efs),
3440 						   (unsigned long) &efs);
3441 			}
3442 			break;
3443 
3444 		case L2CAP_MODE_STREAMING:
3445 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3446 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3447 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3448 			rfc.max_pdu_size = cpu_to_le16(size);
3449 			chan->remote_mps = size;
3450 
3451 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3452 
3453 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3454 					   (unsigned long) &rfc);
3455 
3456 			break;
3457 
3458 		default:
3459 			result = L2CAP_CONF_UNACCEPT;
3460 
3461 			memset(&rfc, 0, sizeof(rfc));
3462 			rfc.mode = chan->mode;
3463 		}
3464 
3465 		if (result == L2CAP_CONF_SUCCESS)
3466 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3467 	}
3468 	rsp->scid   = cpu_to_le16(chan->dcid);
3469 	rsp->result = cpu_to_le16(result);
3470 	rsp->flags  = __constant_cpu_to_le16(0);
3471 
3472 	return ptr - data;
3473 }
3474 
3475 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3476 				void *data, u16 *result)
3477 {
3478 	struct l2cap_conf_req *req = data;
3479 	void *ptr = req->data;
3480 	int type, olen;
3481 	unsigned long val;
3482 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3483 	struct l2cap_conf_efs efs;
3484 
3485 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3486 
3487 	while (len >= L2CAP_CONF_OPT_SIZE) {
3488 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3489 
3490 		switch (type) {
3491 		case L2CAP_CONF_MTU:
3492 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3493 				*result = L2CAP_CONF_UNACCEPT;
3494 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3495 			} else
3496 				chan->imtu = val;
3497 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3498 			break;
3499 
3500 		case L2CAP_CONF_FLUSH_TO:
3501 			chan->flush_to = val;
3502 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3503 					   2, chan->flush_to);
3504 			break;
3505 
3506 		case L2CAP_CONF_RFC:
3507 			if (olen == sizeof(rfc))
3508 				memcpy(&rfc, (void *)val, olen);
3509 
3510 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3511 			    rfc.mode != chan->mode)
3512 				return -ECONNREFUSED;
3513 
3514 			chan->fcs = 0;
3515 
3516 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3517 					   sizeof(rfc), (unsigned long) &rfc);
3518 			break;
3519 
3520 		case L2CAP_CONF_EWS:
3521 			chan->ack_win = min_t(u16, val, chan->ack_win);
3522 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3523 					   chan->tx_win);
3524 			break;
3525 
3526 		case L2CAP_CONF_EFS:
3527 			if (olen == sizeof(efs))
3528 				memcpy(&efs, (void *)val, olen);
3529 
3530 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3531 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3532 			    efs.stype != chan->local_stype)
3533 				return -ECONNREFUSED;
3534 
3535 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3536 					   (unsigned long) &efs);
3537 			break;
3538 
3539 		case L2CAP_CONF_FCS:
3540 			if (*result == L2CAP_CONF_PENDING)
3541 				if (val == L2CAP_FCS_NONE)
3542 					set_bit(CONF_RECV_NO_FCS,
3543 						&chan->conf_state);
3544 			break;
3545 		}
3546 	}
3547 
3548 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3549 		return -ECONNREFUSED;
3550 
3551 	chan->mode = rfc.mode;
3552 
3553 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3554 		switch (rfc.mode) {
3555 		case L2CAP_MODE_ERTM:
3556 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3557 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3558 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3559 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 				chan->ack_win = min_t(u16, chan->ack_win,
3561 						      rfc.txwin_size);
3562 
3563 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3564 				chan->local_msdu = le16_to_cpu(efs.msdu);
3565 				chan->local_sdu_itime =
3566 					le32_to_cpu(efs.sdu_itime);
3567 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3568 				chan->local_flush_to =
3569 					le32_to_cpu(efs.flush_to);
3570 			}
3571 			break;
3572 
3573 		case L2CAP_MODE_STREAMING:
3574 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3575 		}
3576 	}
3577 
3578 	req->dcid   = cpu_to_le16(chan->dcid);
3579 	req->flags  = __constant_cpu_to_le16(0);
3580 
3581 	return ptr - data;
3582 }
3583 
3584 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3585 				u16 result, u16 flags)
3586 {
3587 	struct l2cap_conf_rsp *rsp = data;
3588 	void *ptr = rsp->data;
3589 
3590 	BT_DBG("chan %p", chan);
3591 
3592 	rsp->scid   = cpu_to_le16(chan->dcid);
3593 	rsp->result = cpu_to_le16(result);
3594 	rsp->flags  = cpu_to_le16(flags);
3595 
3596 	return ptr - data;
3597 }
3598 
3599 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3600 {
3601 	struct l2cap_conn_rsp rsp;
3602 	struct l2cap_conn *conn = chan->conn;
3603 	u8 buf[128];
3604 	u8 rsp_code;
3605 
3606 	rsp.scid   = cpu_to_le16(chan->dcid);
3607 	rsp.dcid   = cpu_to_le16(chan->scid);
3608 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3609 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3610 
3611 	if (chan->hs_hcon)
3612 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3613 	else
3614 		rsp_code = L2CAP_CONN_RSP;
3615 
3616 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3617 
3618 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3619 
3620 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3621 		return;
3622 
3623 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3624 		       l2cap_build_conf_req(chan, buf), buf);
3625 	chan->num_conf_req++;
3626 }
3627 
3628 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3629 {
3630 	int type, olen;
3631 	unsigned long val;
3632 	/* Use sane default values in case a misbehaving remote device
3633 	 * did not send an RFC or extended window size option.
3634 	 */
3635 	u16 txwin_ext = chan->ack_win;
3636 	struct l2cap_conf_rfc rfc = {
3637 		.mode = chan->mode,
3638 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3639 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3640 		.max_pdu_size = cpu_to_le16(chan->imtu),
3641 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3642 	};
3643 
3644 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3645 
3646 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3647 		return;
3648 
3649 	while (len >= L2CAP_CONF_OPT_SIZE) {
3650 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3651 
3652 		switch (type) {
3653 		case L2CAP_CONF_RFC:
3654 			if (olen == sizeof(rfc))
3655 				memcpy(&rfc, (void *)val, olen);
3656 			break;
3657 		case L2CAP_CONF_EWS:
3658 			txwin_ext = val;
3659 			break;
3660 		}
3661 	}
3662 
3663 	switch (rfc.mode) {
3664 	case L2CAP_MODE_ERTM:
3665 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3666 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3667 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3668 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3669 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3670 		else
3671 			chan->ack_win = min_t(u16, chan->ack_win,
3672 					      rfc.txwin_size);
3673 		break;
3674 	case L2CAP_MODE_STREAMING:
3675 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3676 	}
3677 }
3678 
3679 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3680 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3681 				    u8 *data)
3682 {
3683 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3684 
3685 	if (cmd_len < sizeof(*rej))
3686 		return -EPROTO;
3687 
3688 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3689 		return 0;
3690 
3691 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3692 	    cmd->ident == conn->info_ident) {
3693 		cancel_delayed_work(&conn->info_timer);
3694 
3695 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3696 		conn->info_ident = 0;
3697 
3698 		l2cap_conn_start(conn);
3699 	}
3700 
3701 	return 0;
3702 }
3703 
3704 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3705 					struct l2cap_cmd_hdr *cmd,
3706 					u8 *data, u8 rsp_code, u8 amp_id)
3707 {
3708 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3709 	struct l2cap_conn_rsp rsp;
3710 	struct l2cap_chan *chan = NULL, *pchan;
3711 	struct sock *parent, *sk = NULL;
3712 	int result, status = L2CAP_CS_NO_INFO;
3713 
3714 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3715 	__le16 psm = req->psm;
3716 
3717 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3718 
3719 	/* Check if we have socket listening on psm */
3720 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3721 	if (!pchan) {
3722 		result = L2CAP_CR_BAD_PSM;
3723 		goto sendresp;
3724 	}
3725 
3726 	parent = pchan->sk;
3727 
3728 	mutex_lock(&conn->chan_lock);
3729 	lock_sock(parent);
3730 
3731 	/* Check if the ACL is secure enough (if not SDP) */
3732 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3733 	    !hci_conn_check_link_mode(conn->hcon)) {
3734 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3735 		result = L2CAP_CR_SEC_BLOCK;
3736 		goto response;
3737 	}
3738 
3739 	result = L2CAP_CR_NO_MEM;
3740 
3741 	/* Check if we already have channel with that dcid */
3742 	if (__l2cap_get_chan_by_dcid(conn, scid))
3743 		goto response;
3744 
3745 	chan = pchan->ops->new_connection(pchan);
3746 	if (!chan)
3747 		goto response;
3748 
3749 	sk = chan->sk;
3750 
3751 	hci_conn_hold(conn->hcon);
3752 
3753 	bacpy(&bt_sk(sk)->src, conn->src);
3754 	bacpy(&bt_sk(sk)->dst, conn->dst);
3755 	chan->psm  = psm;
3756 	chan->dcid = scid;
3757 	chan->local_amp_id = amp_id;
3758 
3759 	__l2cap_chan_add(conn, chan);
3760 
3761 	dcid = chan->scid;
3762 
3763 	__set_chan_timer(chan, sk->sk_sndtimeo);
3764 
3765 	chan->ident = cmd->ident;
3766 
3767 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3768 		if (l2cap_chan_check_security(chan)) {
3769 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3770 				__l2cap_state_change(chan, BT_CONNECT2);
3771 				result = L2CAP_CR_PEND;
3772 				status = L2CAP_CS_AUTHOR_PEND;
3773 				chan->ops->defer(chan);
3774 			} else {
3775 				/* Force pending result for AMP controllers.
3776 				 * The connection will succeed after the
3777 				 * physical link is up.
3778 				 */
3779 				if (amp_id) {
3780 					__l2cap_state_change(chan, BT_CONNECT2);
3781 					result = L2CAP_CR_PEND;
3782 				} else {
3783 					__l2cap_state_change(chan, BT_CONFIG);
3784 					result = L2CAP_CR_SUCCESS;
3785 				}
3786 				status = L2CAP_CS_NO_INFO;
3787 			}
3788 		} else {
3789 			__l2cap_state_change(chan, BT_CONNECT2);
3790 			result = L2CAP_CR_PEND;
3791 			status = L2CAP_CS_AUTHEN_PEND;
3792 		}
3793 	} else {
3794 		__l2cap_state_change(chan, BT_CONNECT2);
3795 		result = L2CAP_CR_PEND;
3796 		status = L2CAP_CS_NO_INFO;
3797 	}
3798 
3799 response:
3800 	release_sock(parent);
3801 	mutex_unlock(&conn->chan_lock);
3802 
3803 sendresp:
3804 	rsp.scid   = cpu_to_le16(scid);
3805 	rsp.dcid   = cpu_to_le16(dcid);
3806 	rsp.result = cpu_to_le16(result);
3807 	rsp.status = cpu_to_le16(status);
3808 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3809 
3810 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3811 		struct l2cap_info_req info;
3812 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3813 
3814 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3815 		conn->info_ident = l2cap_get_ident(conn);
3816 
3817 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3818 
3819 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3820 			       sizeof(info), &info);
3821 	}
3822 
3823 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3824 	    result == L2CAP_CR_SUCCESS) {
3825 		u8 buf[128];
3826 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3827 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3828 			       l2cap_build_conf_req(chan, buf), buf);
3829 		chan->num_conf_req++;
3830 	}
3831 
3832 	return chan;
3833 }
3834 
3835 static int l2cap_connect_req(struct l2cap_conn *conn,
3836 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3837 {
3838 	struct hci_dev *hdev = conn->hcon->hdev;
3839 	struct hci_conn *hcon = conn->hcon;
3840 
3841 	if (cmd_len < sizeof(struct l2cap_conn_req))
3842 		return -EPROTO;
3843 
3844 	hci_dev_lock(hdev);
3845 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3846 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3847 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3848 				      hcon->dst_type, 0, NULL, 0,
3849 				      hcon->dev_class);
3850 	hci_dev_unlock(hdev);
3851 
3852 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3853 	return 0;
3854 }
3855 
3856 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3857 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3858 				    u8 *data)
3859 {
3860 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3861 	u16 scid, dcid, result, status;
3862 	struct l2cap_chan *chan;
3863 	u8 req[128];
3864 	int err;
3865 
3866 	if (cmd_len < sizeof(*rsp))
3867 		return -EPROTO;
3868 
3869 	scid   = __le16_to_cpu(rsp->scid);
3870 	dcid   = __le16_to_cpu(rsp->dcid);
3871 	result = __le16_to_cpu(rsp->result);
3872 	status = __le16_to_cpu(rsp->status);
3873 
3874 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3875 	       dcid, scid, result, status);
3876 
3877 	mutex_lock(&conn->chan_lock);
3878 
3879 	if (scid) {
3880 		chan = __l2cap_get_chan_by_scid(conn, scid);
3881 		if (!chan) {
3882 			err = -EFAULT;
3883 			goto unlock;
3884 		}
3885 	} else {
3886 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3887 		if (!chan) {
3888 			err = -EFAULT;
3889 			goto unlock;
3890 		}
3891 	}
3892 
3893 	err = 0;
3894 
3895 	l2cap_chan_lock(chan);
3896 
3897 	switch (result) {
3898 	case L2CAP_CR_SUCCESS:
3899 		l2cap_state_change(chan, BT_CONFIG);
3900 		chan->ident = 0;
3901 		chan->dcid = dcid;
3902 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3903 
3904 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3905 			break;
3906 
3907 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3908 			       l2cap_build_conf_req(chan, req), req);
3909 		chan->num_conf_req++;
3910 		break;
3911 
3912 	case L2CAP_CR_PEND:
3913 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3914 		break;
3915 
3916 	default:
3917 		l2cap_chan_del(chan, ECONNREFUSED);
3918 		break;
3919 	}
3920 
3921 	l2cap_chan_unlock(chan);
3922 
3923 unlock:
3924 	mutex_unlock(&conn->chan_lock);
3925 
3926 	return err;
3927 }
3928 
3929 static inline void set_default_fcs(struct l2cap_chan *chan)
3930 {
3931 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3932 	 * sides request it.
3933 	 */
3934 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3935 		chan->fcs = L2CAP_FCS_NONE;
3936 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3937 		chan->fcs = L2CAP_FCS_CRC16;
3938 }
3939 
3940 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3941 				    u8 ident, u16 flags)
3942 {
3943 	struct l2cap_conn *conn = chan->conn;
3944 
3945 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3946 	       flags);
3947 
3948 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3949 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3950 
3951 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3952 		       l2cap_build_conf_rsp(chan, data,
3953 					    L2CAP_CONF_SUCCESS, flags), data);
3954 }
3955 
3956 static inline int l2cap_config_req(struct l2cap_conn *conn,
3957 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3958 				   u8 *data)
3959 {
3960 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3961 	u16 dcid, flags;
3962 	u8 rsp[64];
3963 	struct l2cap_chan *chan;
3964 	int len, err = 0;
3965 
3966 	if (cmd_len < sizeof(*req))
3967 		return -EPROTO;
3968 
3969 	dcid  = __le16_to_cpu(req->dcid);
3970 	flags = __le16_to_cpu(req->flags);
3971 
3972 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3973 
3974 	chan = l2cap_get_chan_by_scid(conn, dcid);
3975 	if (!chan)
3976 		return -ENOENT;
3977 
3978 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3979 		struct l2cap_cmd_rej_cid rej;
3980 
3981 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3982 		rej.scid = cpu_to_le16(chan->scid);
3983 		rej.dcid = cpu_to_le16(chan->dcid);
3984 
3985 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3986 			       sizeof(rej), &rej);
3987 		goto unlock;
3988 	}
3989 
3990 	/* Reject if config buffer is too small. */
3991 	len = cmd_len - sizeof(*req);
3992 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
3993 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3994 			       l2cap_build_conf_rsp(chan, rsp,
3995 			       L2CAP_CONF_REJECT, flags), rsp);
3996 		goto unlock;
3997 	}
3998 
3999 	/* Store config. */
4000 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4001 	chan->conf_len += len;
4002 
4003 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4004 		/* Incomplete config. Send empty response. */
4005 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4006 			       l2cap_build_conf_rsp(chan, rsp,
4007 			       L2CAP_CONF_SUCCESS, flags), rsp);
4008 		goto unlock;
4009 	}
4010 
4011 	/* Complete config. */
4012 	len = l2cap_parse_conf_req(chan, rsp);
4013 	if (len < 0) {
4014 		l2cap_send_disconn_req(chan, ECONNRESET);
4015 		goto unlock;
4016 	}
4017 
4018 	chan->ident = cmd->ident;
4019 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4020 	chan->num_conf_rsp++;
4021 
4022 	/* Reset config buffer. */
4023 	chan->conf_len = 0;
4024 
4025 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4026 		goto unlock;
4027 
4028 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4029 		set_default_fcs(chan);
4030 
4031 		if (chan->mode == L2CAP_MODE_ERTM ||
4032 		    chan->mode == L2CAP_MODE_STREAMING)
4033 			err = l2cap_ertm_init(chan);
4034 
4035 		if (err < 0)
4036 			l2cap_send_disconn_req(chan, -err);
4037 		else
4038 			l2cap_chan_ready(chan);
4039 
4040 		goto unlock;
4041 	}
4042 
4043 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4044 		u8 buf[64];
4045 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4046 			       l2cap_build_conf_req(chan, buf), buf);
4047 		chan->num_conf_req++;
4048 	}
4049 
4050 	/* Got Conf Rsp PENDING from remote side and asume we sent
4051 	   Conf Rsp PENDING in the code above */
4052 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4053 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4054 
4055 		/* check compatibility */
4056 
4057 		/* Send rsp for BR/EDR channel */
4058 		if (!chan->hs_hcon)
4059 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4060 		else
4061 			chan->ident = cmd->ident;
4062 	}
4063 
4064 unlock:
4065 	l2cap_chan_unlock(chan);
4066 	return err;
4067 }
4068 
4069 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4070 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4071 				   u8 *data)
4072 {
4073 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4074 	u16 scid, flags, result;
4075 	struct l2cap_chan *chan;
4076 	int len = cmd_len - sizeof(*rsp);
4077 	int err = 0;
4078 
4079 	if (cmd_len < sizeof(*rsp))
4080 		return -EPROTO;
4081 
4082 	scid   = __le16_to_cpu(rsp->scid);
4083 	flags  = __le16_to_cpu(rsp->flags);
4084 	result = __le16_to_cpu(rsp->result);
4085 
4086 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4087 	       result, len);
4088 
4089 	chan = l2cap_get_chan_by_scid(conn, scid);
4090 	if (!chan)
4091 		return 0;
4092 
4093 	switch (result) {
4094 	case L2CAP_CONF_SUCCESS:
4095 		l2cap_conf_rfc_get(chan, rsp->data, len);
4096 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4097 		break;
4098 
4099 	case L2CAP_CONF_PENDING:
4100 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4101 
4102 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4103 			char buf[64];
4104 
4105 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4106 						   buf, &result);
4107 			if (len < 0) {
4108 				l2cap_send_disconn_req(chan, ECONNRESET);
4109 				goto done;
4110 			}
4111 
4112 			if (!chan->hs_hcon) {
4113 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4114 							0);
4115 			} else {
4116 				if (l2cap_check_efs(chan)) {
4117 					amp_create_logical_link(chan);
4118 					chan->ident = cmd->ident;
4119 				}
4120 			}
4121 		}
4122 		goto done;
4123 
4124 	case L2CAP_CONF_UNACCEPT:
4125 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4126 			char req[64];
4127 
4128 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4129 				l2cap_send_disconn_req(chan, ECONNRESET);
4130 				goto done;
4131 			}
4132 
4133 			/* throw out any old stored conf requests */
4134 			result = L2CAP_CONF_SUCCESS;
4135 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4136 						   req, &result);
4137 			if (len < 0) {
4138 				l2cap_send_disconn_req(chan, ECONNRESET);
4139 				goto done;
4140 			}
4141 
4142 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4143 				       L2CAP_CONF_REQ, len, req);
4144 			chan->num_conf_req++;
4145 			if (result != L2CAP_CONF_SUCCESS)
4146 				goto done;
4147 			break;
4148 		}
4149 
4150 	default:
4151 		l2cap_chan_set_err(chan, ECONNRESET);
4152 
4153 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4154 		l2cap_send_disconn_req(chan, ECONNRESET);
4155 		goto done;
4156 	}
4157 
4158 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4159 		goto done;
4160 
4161 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4162 
4163 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4164 		set_default_fcs(chan);
4165 
4166 		if (chan->mode == L2CAP_MODE_ERTM ||
4167 		    chan->mode == L2CAP_MODE_STREAMING)
4168 			err = l2cap_ertm_init(chan);
4169 
4170 		if (err < 0)
4171 			l2cap_send_disconn_req(chan, -err);
4172 		else
4173 			l2cap_chan_ready(chan);
4174 	}
4175 
4176 done:
4177 	l2cap_chan_unlock(chan);
4178 	return err;
4179 }
4180 
4181 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4182 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4183 				       u8 *data)
4184 {
4185 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4186 	struct l2cap_disconn_rsp rsp;
4187 	u16 dcid, scid;
4188 	struct l2cap_chan *chan;
4189 	struct sock *sk;
4190 
4191 	if (cmd_len != sizeof(*req))
4192 		return -EPROTO;
4193 
4194 	scid = __le16_to_cpu(req->scid);
4195 	dcid = __le16_to_cpu(req->dcid);
4196 
4197 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4198 
4199 	mutex_lock(&conn->chan_lock);
4200 
4201 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4202 	if (!chan) {
4203 		mutex_unlock(&conn->chan_lock);
4204 		return 0;
4205 	}
4206 
4207 	l2cap_chan_lock(chan);
4208 
4209 	sk = chan->sk;
4210 
4211 	rsp.dcid = cpu_to_le16(chan->scid);
4212 	rsp.scid = cpu_to_le16(chan->dcid);
4213 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4214 
4215 	lock_sock(sk);
4216 	sk->sk_shutdown = SHUTDOWN_MASK;
4217 	release_sock(sk);
4218 
4219 	l2cap_chan_hold(chan);
4220 	l2cap_chan_del(chan, ECONNRESET);
4221 
4222 	l2cap_chan_unlock(chan);
4223 
4224 	chan->ops->close(chan);
4225 	l2cap_chan_put(chan);
4226 
4227 	mutex_unlock(&conn->chan_lock);
4228 
4229 	return 0;
4230 }
4231 
4232 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4233 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4234 				       u8 *data)
4235 {
4236 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4237 	u16 dcid, scid;
4238 	struct l2cap_chan *chan;
4239 
4240 	if (cmd_len != sizeof(*rsp))
4241 		return -EPROTO;
4242 
4243 	scid = __le16_to_cpu(rsp->scid);
4244 	dcid = __le16_to_cpu(rsp->dcid);
4245 
4246 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4247 
4248 	mutex_lock(&conn->chan_lock);
4249 
4250 	chan = __l2cap_get_chan_by_scid(conn, scid);
4251 	if (!chan) {
4252 		mutex_unlock(&conn->chan_lock);
4253 		return 0;
4254 	}
4255 
4256 	l2cap_chan_lock(chan);
4257 
4258 	l2cap_chan_hold(chan);
4259 	l2cap_chan_del(chan, 0);
4260 
4261 	l2cap_chan_unlock(chan);
4262 
4263 	chan->ops->close(chan);
4264 	l2cap_chan_put(chan);
4265 
4266 	mutex_unlock(&conn->chan_lock);
4267 
4268 	return 0;
4269 }
4270 
4271 static inline int l2cap_information_req(struct l2cap_conn *conn,
4272 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4273 					u8 *data)
4274 {
4275 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4276 	u16 type;
4277 
4278 	if (cmd_len != sizeof(*req))
4279 		return -EPROTO;
4280 
4281 	type = __le16_to_cpu(req->type);
4282 
4283 	BT_DBG("type 0x%4.4x", type);
4284 
4285 	if (type == L2CAP_IT_FEAT_MASK) {
4286 		u8 buf[8];
4287 		u32 feat_mask = l2cap_feat_mask;
4288 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4289 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4290 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4291 		if (!disable_ertm)
4292 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4293 				| L2CAP_FEAT_FCS;
4294 		if (enable_hs)
4295 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4296 				| L2CAP_FEAT_EXT_WINDOW;
4297 
4298 		put_unaligned_le32(feat_mask, rsp->data);
4299 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4300 			       buf);
4301 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4302 		u8 buf[12];
4303 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4304 
4305 		if (enable_hs)
4306 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4307 		else
4308 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4309 
4310 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4311 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4312 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4313 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4314 			       buf);
4315 	} else {
4316 		struct l2cap_info_rsp rsp;
4317 		rsp.type   = cpu_to_le16(type);
4318 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4319 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4320 			       &rsp);
4321 	}
4322 
4323 	return 0;
4324 }
4325 
4326 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4327 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 					u8 *data)
4329 {
4330 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4331 	u16 type, result;
4332 
4333 	if (cmd_len != sizeof(*rsp))
4334 		return -EPROTO;
4335 
4336 	type   = __le16_to_cpu(rsp->type);
4337 	result = __le16_to_cpu(rsp->result);
4338 
4339 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4340 
4341 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4342 	if (cmd->ident != conn->info_ident ||
4343 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4344 		return 0;
4345 
4346 	cancel_delayed_work(&conn->info_timer);
4347 
4348 	if (result != L2CAP_IR_SUCCESS) {
4349 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4350 		conn->info_ident = 0;
4351 
4352 		l2cap_conn_start(conn);
4353 
4354 		return 0;
4355 	}
4356 
4357 	switch (type) {
4358 	case L2CAP_IT_FEAT_MASK:
4359 		conn->feat_mask = get_unaligned_le32(rsp->data);
4360 
4361 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4362 			struct l2cap_info_req req;
4363 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4364 
4365 			conn->info_ident = l2cap_get_ident(conn);
4366 
4367 			l2cap_send_cmd(conn, conn->info_ident,
4368 				       L2CAP_INFO_REQ, sizeof(req), &req);
4369 		} else {
4370 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4371 			conn->info_ident = 0;
4372 
4373 			l2cap_conn_start(conn);
4374 		}
4375 		break;
4376 
4377 	case L2CAP_IT_FIXED_CHAN:
4378 		conn->fixed_chan_mask = rsp->data[0];
4379 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 		conn->info_ident = 0;
4381 
4382 		l2cap_conn_start(conn);
4383 		break;
4384 	}
4385 
4386 	return 0;
4387 }
4388 
4389 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4390 				    struct l2cap_cmd_hdr *cmd,
4391 				    u16 cmd_len, void *data)
4392 {
4393 	struct l2cap_create_chan_req *req = data;
4394 	struct l2cap_create_chan_rsp rsp;
4395 	struct l2cap_chan *chan;
4396 	struct hci_dev *hdev;
4397 	u16 psm, scid;
4398 
4399 	if (cmd_len != sizeof(*req))
4400 		return -EPROTO;
4401 
4402 	if (!enable_hs)
4403 		return -EINVAL;
4404 
4405 	psm = le16_to_cpu(req->psm);
4406 	scid = le16_to_cpu(req->scid);
4407 
4408 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4409 
4410 	/* For controller id 0 make BR/EDR connection */
4411 	if (req->amp_id == HCI_BREDR_ID) {
4412 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4413 			      req->amp_id);
4414 		return 0;
4415 	}
4416 
4417 	/* Validate AMP controller id */
4418 	hdev = hci_dev_get(req->amp_id);
4419 	if (!hdev)
4420 		goto error;
4421 
4422 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4423 		hci_dev_put(hdev);
4424 		goto error;
4425 	}
4426 
4427 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4428 			     req->amp_id);
4429 	if (chan) {
4430 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4431 		struct hci_conn *hs_hcon;
4432 
4433 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4434 		if (!hs_hcon) {
4435 			hci_dev_put(hdev);
4436 			return -EFAULT;
4437 		}
4438 
4439 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4440 
4441 		mgr->bredr_chan = chan;
4442 		chan->hs_hcon = hs_hcon;
4443 		chan->fcs = L2CAP_FCS_NONE;
4444 		conn->mtu = hdev->block_mtu;
4445 	}
4446 
4447 	hci_dev_put(hdev);
4448 
4449 	return 0;
4450 
4451 error:
4452 	rsp.dcid = 0;
4453 	rsp.scid = cpu_to_le16(scid);
4454 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4455 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4456 
4457 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4458 		       sizeof(rsp), &rsp);
4459 
4460 	return -EFAULT;
4461 }
4462 
4463 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4464 {
4465 	struct l2cap_move_chan_req req;
4466 	u8 ident;
4467 
4468 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4469 
4470 	ident = l2cap_get_ident(chan->conn);
4471 	chan->ident = ident;
4472 
4473 	req.icid = cpu_to_le16(chan->scid);
4474 	req.dest_amp_id = dest_amp_id;
4475 
4476 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4477 		       &req);
4478 
4479 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4480 }
4481 
4482 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4483 {
4484 	struct l2cap_move_chan_rsp rsp;
4485 
4486 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4487 
4488 	rsp.icid = cpu_to_le16(chan->dcid);
4489 	rsp.result = cpu_to_le16(result);
4490 
4491 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4492 		       sizeof(rsp), &rsp);
4493 }
4494 
4495 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4496 {
4497 	struct l2cap_move_chan_cfm cfm;
4498 
4499 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4500 
4501 	chan->ident = l2cap_get_ident(chan->conn);
4502 
4503 	cfm.icid = cpu_to_le16(chan->scid);
4504 	cfm.result = cpu_to_le16(result);
4505 
4506 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4507 		       sizeof(cfm), &cfm);
4508 
4509 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4510 }
4511 
4512 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4513 {
4514 	struct l2cap_move_chan_cfm cfm;
4515 
4516 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4517 
4518 	cfm.icid = cpu_to_le16(icid);
4519 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4520 
4521 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4522 		       sizeof(cfm), &cfm);
4523 }
4524 
4525 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4526 					 u16 icid)
4527 {
4528 	struct l2cap_move_chan_cfm_rsp rsp;
4529 
4530 	BT_DBG("icid 0x%4.4x", icid);
4531 
4532 	rsp.icid = cpu_to_le16(icid);
4533 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4534 }
4535 
4536 static void __release_logical_link(struct l2cap_chan *chan)
4537 {
4538 	chan->hs_hchan = NULL;
4539 	chan->hs_hcon = NULL;
4540 
4541 	/* Placeholder - release the logical link */
4542 }
4543 
4544 static void l2cap_logical_fail(struct l2cap_chan *chan)
4545 {
4546 	/* Logical link setup failed */
4547 	if (chan->state != BT_CONNECTED) {
4548 		/* Create channel failure, disconnect */
4549 		l2cap_send_disconn_req(chan, ECONNRESET);
4550 		return;
4551 	}
4552 
4553 	switch (chan->move_role) {
4554 	case L2CAP_MOVE_ROLE_RESPONDER:
4555 		l2cap_move_done(chan);
4556 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4557 		break;
4558 	case L2CAP_MOVE_ROLE_INITIATOR:
4559 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4560 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4561 			/* Remote has only sent pending or
4562 			 * success responses, clean up
4563 			 */
4564 			l2cap_move_done(chan);
4565 		}
4566 
4567 		/* Other amp move states imply that the move
4568 		 * has already aborted
4569 		 */
4570 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4571 		break;
4572 	}
4573 }
4574 
4575 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4576 					struct hci_chan *hchan)
4577 {
4578 	struct l2cap_conf_rsp rsp;
4579 
4580 	chan->hs_hchan = hchan;
4581 	chan->hs_hcon->l2cap_data = chan->conn;
4582 
4583 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4584 
4585 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4586 		int err;
4587 
4588 		set_default_fcs(chan);
4589 
4590 		err = l2cap_ertm_init(chan);
4591 		if (err < 0)
4592 			l2cap_send_disconn_req(chan, -err);
4593 		else
4594 			l2cap_chan_ready(chan);
4595 	}
4596 }
4597 
4598 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4599 				      struct hci_chan *hchan)
4600 {
4601 	chan->hs_hcon = hchan->conn;
4602 	chan->hs_hcon->l2cap_data = chan->conn;
4603 
4604 	BT_DBG("move_state %d", chan->move_state);
4605 
4606 	switch (chan->move_state) {
4607 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4608 		/* Move confirm will be sent after a success
4609 		 * response is received
4610 		 */
4611 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4612 		break;
4613 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4614 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4615 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4616 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4617 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4618 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4619 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4620 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4621 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4622 		}
4623 		break;
4624 	default:
4625 		/* Move was not in expected state, free the channel */
4626 		__release_logical_link(chan);
4627 
4628 		chan->move_state = L2CAP_MOVE_STABLE;
4629 	}
4630 }
4631 
4632 /* Call with chan locked */
4633 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4634 		       u8 status)
4635 {
4636 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4637 
4638 	if (status) {
4639 		l2cap_logical_fail(chan);
4640 		__release_logical_link(chan);
4641 		return;
4642 	}
4643 
4644 	if (chan->state != BT_CONNECTED) {
4645 		/* Ignore logical link if channel is on BR/EDR */
4646 		if (chan->local_amp_id)
4647 			l2cap_logical_finish_create(chan, hchan);
4648 	} else {
4649 		l2cap_logical_finish_move(chan, hchan);
4650 	}
4651 }
4652 
4653 void l2cap_move_start(struct l2cap_chan *chan)
4654 {
4655 	BT_DBG("chan %p", chan);
4656 
4657 	if (chan->local_amp_id == HCI_BREDR_ID) {
4658 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4659 			return;
4660 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4661 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4662 		/* Placeholder - start physical link setup */
4663 	} else {
4664 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4665 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4666 		chan->move_id = 0;
4667 		l2cap_move_setup(chan);
4668 		l2cap_send_move_chan_req(chan, 0);
4669 	}
4670 }
4671 
4672 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4673 			    u8 local_amp_id, u8 remote_amp_id)
4674 {
4675 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4676 	       local_amp_id, remote_amp_id);
4677 
4678 	chan->fcs = L2CAP_FCS_NONE;
4679 
4680 	/* Outgoing channel on AMP */
4681 	if (chan->state == BT_CONNECT) {
4682 		if (result == L2CAP_CR_SUCCESS) {
4683 			chan->local_amp_id = local_amp_id;
4684 			l2cap_send_create_chan_req(chan, remote_amp_id);
4685 		} else {
4686 			/* Revert to BR/EDR connect */
4687 			l2cap_send_conn_req(chan);
4688 		}
4689 
4690 		return;
4691 	}
4692 
4693 	/* Incoming channel on AMP */
4694 	if (__l2cap_no_conn_pending(chan)) {
4695 		struct l2cap_conn_rsp rsp;
4696 		char buf[128];
4697 		rsp.scid = cpu_to_le16(chan->dcid);
4698 		rsp.dcid = cpu_to_le16(chan->scid);
4699 
4700 		if (result == L2CAP_CR_SUCCESS) {
4701 			/* Send successful response */
4702 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4703 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4704 		} else {
4705 			/* Send negative response */
4706 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4707 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4708 		}
4709 
4710 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4711 			       sizeof(rsp), &rsp);
4712 
4713 		if (result == L2CAP_CR_SUCCESS) {
4714 			__l2cap_state_change(chan, BT_CONFIG);
4715 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4716 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4717 				       L2CAP_CONF_REQ,
4718 				       l2cap_build_conf_req(chan, buf), buf);
4719 			chan->num_conf_req++;
4720 		}
4721 	}
4722 }
4723 
4724 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4725 				   u8 remote_amp_id)
4726 {
4727 	l2cap_move_setup(chan);
4728 	chan->move_id = local_amp_id;
4729 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4730 
4731 	l2cap_send_move_chan_req(chan, remote_amp_id);
4732 }
4733 
4734 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4735 {
4736 	struct hci_chan *hchan = NULL;
4737 
4738 	/* Placeholder - get hci_chan for logical link */
4739 
4740 	if (hchan) {
4741 		if (hchan->state == BT_CONNECTED) {
4742 			/* Logical link is ready to go */
4743 			chan->hs_hcon = hchan->conn;
4744 			chan->hs_hcon->l2cap_data = chan->conn;
4745 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4746 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4747 
4748 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4749 		} else {
4750 			/* Wait for logical link to be ready */
4751 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4752 		}
4753 	} else {
4754 		/* Logical link not available */
4755 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4756 	}
4757 }
4758 
4759 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4760 {
4761 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4762 		u8 rsp_result;
4763 		if (result == -EINVAL)
4764 			rsp_result = L2CAP_MR_BAD_ID;
4765 		else
4766 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4767 
4768 		l2cap_send_move_chan_rsp(chan, rsp_result);
4769 	}
4770 
4771 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4772 	chan->move_state = L2CAP_MOVE_STABLE;
4773 
4774 	/* Restart data transmission */
4775 	l2cap_ertm_send(chan);
4776 }
4777 
4778 /* Invoke with locked chan */
4779 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4780 {
4781 	u8 local_amp_id = chan->local_amp_id;
4782 	u8 remote_amp_id = chan->remote_amp_id;
4783 
4784 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4785 	       chan, result, local_amp_id, remote_amp_id);
4786 
4787 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4788 		l2cap_chan_unlock(chan);
4789 		return;
4790 	}
4791 
4792 	if (chan->state != BT_CONNECTED) {
4793 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4794 	} else if (result != L2CAP_MR_SUCCESS) {
4795 		l2cap_do_move_cancel(chan, result);
4796 	} else {
4797 		switch (chan->move_role) {
4798 		case L2CAP_MOVE_ROLE_INITIATOR:
4799 			l2cap_do_move_initiate(chan, local_amp_id,
4800 					       remote_amp_id);
4801 			break;
4802 		case L2CAP_MOVE_ROLE_RESPONDER:
4803 			l2cap_do_move_respond(chan, result);
4804 			break;
4805 		default:
4806 			l2cap_do_move_cancel(chan, result);
4807 			break;
4808 		}
4809 	}
4810 }
4811 
4812 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4813 					 struct l2cap_cmd_hdr *cmd,
4814 					 u16 cmd_len, void *data)
4815 {
4816 	struct l2cap_move_chan_req *req = data;
4817 	struct l2cap_move_chan_rsp rsp;
4818 	struct l2cap_chan *chan;
4819 	u16 icid = 0;
4820 	u16 result = L2CAP_MR_NOT_ALLOWED;
4821 
4822 	if (cmd_len != sizeof(*req))
4823 		return -EPROTO;
4824 
4825 	icid = le16_to_cpu(req->icid);
4826 
4827 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4828 
4829 	if (!enable_hs)
4830 		return -EINVAL;
4831 
4832 	chan = l2cap_get_chan_by_dcid(conn, icid);
4833 	if (!chan) {
4834 		rsp.icid = cpu_to_le16(icid);
4835 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4836 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4837 			       sizeof(rsp), &rsp);
4838 		return 0;
4839 	}
4840 
4841 	chan->ident = cmd->ident;
4842 
4843 	if (chan->scid < L2CAP_CID_DYN_START ||
4844 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4845 	    (chan->mode != L2CAP_MODE_ERTM &&
4846 	     chan->mode != L2CAP_MODE_STREAMING)) {
4847 		result = L2CAP_MR_NOT_ALLOWED;
4848 		goto send_move_response;
4849 	}
4850 
4851 	if (chan->local_amp_id == req->dest_amp_id) {
4852 		result = L2CAP_MR_SAME_ID;
4853 		goto send_move_response;
4854 	}
4855 
4856 	if (req->dest_amp_id) {
4857 		struct hci_dev *hdev;
4858 		hdev = hci_dev_get(req->dest_amp_id);
4859 		if (!hdev || hdev->dev_type != HCI_AMP ||
4860 		    !test_bit(HCI_UP, &hdev->flags)) {
4861 			if (hdev)
4862 				hci_dev_put(hdev);
4863 
4864 			result = L2CAP_MR_BAD_ID;
4865 			goto send_move_response;
4866 		}
4867 		hci_dev_put(hdev);
4868 	}
4869 
4870 	/* Detect a move collision.  Only send a collision response
4871 	 * if this side has "lost", otherwise proceed with the move.
4872 	 * The winner has the larger bd_addr.
4873 	 */
4874 	if ((__chan_is_moving(chan) ||
4875 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4876 	    bacmp(conn->src, conn->dst) > 0) {
4877 		result = L2CAP_MR_COLLISION;
4878 		goto send_move_response;
4879 	}
4880 
4881 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4882 	l2cap_move_setup(chan);
4883 	chan->move_id = req->dest_amp_id;
4884 	icid = chan->dcid;
4885 
4886 	if (!req->dest_amp_id) {
4887 		/* Moving to BR/EDR */
4888 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4889 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4890 			result = L2CAP_MR_PEND;
4891 		} else {
4892 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4893 			result = L2CAP_MR_SUCCESS;
4894 		}
4895 	} else {
4896 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4897 		/* Placeholder - uncomment when amp functions are available */
4898 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4899 		result = L2CAP_MR_PEND;
4900 	}
4901 
4902 send_move_response:
4903 	l2cap_send_move_chan_rsp(chan, result);
4904 
4905 	l2cap_chan_unlock(chan);
4906 
4907 	return 0;
4908 }
4909 
4910 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4911 {
4912 	struct l2cap_chan *chan;
4913 	struct hci_chan *hchan = NULL;
4914 
4915 	chan = l2cap_get_chan_by_scid(conn, icid);
4916 	if (!chan) {
4917 		l2cap_send_move_chan_cfm_icid(conn, icid);
4918 		return;
4919 	}
4920 
4921 	__clear_chan_timer(chan);
4922 	if (result == L2CAP_MR_PEND)
4923 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4924 
4925 	switch (chan->move_state) {
4926 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4927 		/* Move confirm will be sent when logical link
4928 		 * is complete.
4929 		 */
4930 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4931 		break;
4932 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4933 		if (result == L2CAP_MR_PEND) {
4934 			break;
4935 		} else if (test_bit(CONN_LOCAL_BUSY,
4936 				    &chan->conn_state)) {
4937 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4938 		} else {
4939 			/* Logical link is up or moving to BR/EDR,
4940 			 * proceed with move
4941 			 */
4942 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4943 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4944 		}
4945 		break;
4946 	case L2CAP_MOVE_WAIT_RSP:
4947 		/* Moving to AMP */
4948 		if (result == L2CAP_MR_SUCCESS) {
4949 			/* Remote is ready, send confirm immediately
4950 			 * after logical link is ready
4951 			 */
4952 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4953 		} else {
4954 			/* Both logical link and move success
4955 			 * are required to confirm
4956 			 */
4957 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4958 		}
4959 
4960 		/* Placeholder - get hci_chan for logical link */
4961 		if (!hchan) {
4962 			/* Logical link not available */
4963 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4964 			break;
4965 		}
4966 
4967 		/* If the logical link is not yet connected, do not
4968 		 * send confirmation.
4969 		 */
4970 		if (hchan->state != BT_CONNECTED)
4971 			break;
4972 
4973 		/* Logical link is already ready to go */
4974 
4975 		chan->hs_hcon = hchan->conn;
4976 		chan->hs_hcon->l2cap_data = chan->conn;
4977 
4978 		if (result == L2CAP_MR_SUCCESS) {
4979 			/* Can confirm now */
4980 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4981 		} else {
4982 			/* Now only need move success
4983 			 * to confirm
4984 			 */
4985 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4986 		}
4987 
4988 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4989 		break;
4990 	default:
4991 		/* Any other amp move state means the move failed. */
4992 		chan->move_id = chan->local_amp_id;
4993 		l2cap_move_done(chan);
4994 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4995 	}
4996 
4997 	l2cap_chan_unlock(chan);
4998 }
4999 
5000 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5001 			    u16 result)
5002 {
5003 	struct l2cap_chan *chan;
5004 
5005 	chan = l2cap_get_chan_by_ident(conn, ident);
5006 	if (!chan) {
5007 		/* Could not locate channel, icid is best guess */
5008 		l2cap_send_move_chan_cfm_icid(conn, icid);
5009 		return;
5010 	}
5011 
5012 	__clear_chan_timer(chan);
5013 
5014 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5015 		if (result == L2CAP_MR_COLLISION) {
5016 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5017 		} else {
5018 			/* Cleanup - cancel move */
5019 			chan->move_id = chan->local_amp_id;
5020 			l2cap_move_done(chan);
5021 		}
5022 	}
5023 
5024 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5025 
5026 	l2cap_chan_unlock(chan);
5027 }
5028 
5029 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5030 				  struct l2cap_cmd_hdr *cmd,
5031 				  u16 cmd_len, void *data)
5032 {
5033 	struct l2cap_move_chan_rsp *rsp = data;
5034 	u16 icid, result;
5035 
5036 	if (cmd_len != sizeof(*rsp))
5037 		return -EPROTO;
5038 
5039 	icid = le16_to_cpu(rsp->icid);
5040 	result = le16_to_cpu(rsp->result);
5041 
5042 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5043 
5044 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5045 		l2cap_move_continue(conn, icid, result);
5046 	else
5047 		l2cap_move_fail(conn, cmd->ident, icid, result);
5048 
5049 	return 0;
5050 }
5051 
5052 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5053 				      struct l2cap_cmd_hdr *cmd,
5054 				      u16 cmd_len, void *data)
5055 {
5056 	struct l2cap_move_chan_cfm *cfm = data;
5057 	struct l2cap_chan *chan;
5058 	u16 icid, result;
5059 
5060 	if (cmd_len != sizeof(*cfm))
5061 		return -EPROTO;
5062 
5063 	icid = le16_to_cpu(cfm->icid);
5064 	result = le16_to_cpu(cfm->result);
5065 
5066 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5067 
5068 	chan = l2cap_get_chan_by_dcid(conn, icid);
5069 	if (!chan) {
5070 		/* Spec requires a response even if the icid was not found */
5071 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5072 		return 0;
5073 	}
5074 
5075 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5076 		if (result == L2CAP_MC_CONFIRMED) {
5077 			chan->local_amp_id = chan->move_id;
5078 			if (!chan->local_amp_id)
5079 				__release_logical_link(chan);
5080 		} else {
5081 			chan->move_id = chan->local_amp_id;
5082 		}
5083 
5084 		l2cap_move_done(chan);
5085 	}
5086 
5087 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5088 
5089 	l2cap_chan_unlock(chan);
5090 
5091 	return 0;
5092 }
5093 
5094 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5095 						 struct l2cap_cmd_hdr *cmd,
5096 						 u16 cmd_len, void *data)
5097 {
5098 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5099 	struct l2cap_chan *chan;
5100 	u16 icid;
5101 
5102 	if (cmd_len != sizeof(*rsp))
5103 		return -EPROTO;
5104 
5105 	icid = le16_to_cpu(rsp->icid);
5106 
5107 	BT_DBG("icid 0x%4.4x", icid);
5108 
5109 	chan = l2cap_get_chan_by_scid(conn, icid);
5110 	if (!chan)
5111 		return 0;
5112 
5113 	__clear_chan_timer(chan);
5114 
5115 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5116 		chan->local_amp_id = chan->move_id;
5117 
5118 		if (!chan->local_amp_id && chan->hs_hchan)
5119 			__release_logical_link(chan);
5120 
5121 		l2cap_move_done(chan);
5122 	}
5123 
5124 	l2cap_chan_unlock(chan);
5125 
5126 	return 0;
5127 }
5128 
5129 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
5130 					 u16 to_multiplier)
5131 {
5132 	u16 max_latency;
5133 
5134 	if (min > max || min < 6 || max > 3200)
5135 		return -EINVAL;
5136 
5137 	if (to_multiplier < 10 || to_multiplier > 3200)
5138 		return -EINVAL;
5139 
5140 	if (max >= to_multiplier * 8)
5141 		return -EINVAL;
5142 
5143 	max_latency = (to_multiplier * 8 / max) - 1;
5144 	if (latency > 499 || latency > max_latency)
5145 		return -EINVAL;
5146 
5147 	return 0;
5148 }
5149 
5150 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5151 					      struct l2cap_cmd_hdr *cmd,
5152 					      u8 *data)
5153 {
5154 	struct hci_conn *hcon = conn->hcon;
5155 	struct l2cap_conn_param_update_req *req;
5156 	struct l2cap_conn_param_update_rsp rsp;
5157 	u16 min, max, latency, to_multiplier, cmd_len;
5158 	int err;
5159 
5160 	if (!(hcon->link_mode & HCI_LM_MASTER))
5161 		return -EINVAL;
5162 
5163 	cmd_len = __le16_to_cpu(cmd->len);
5164 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5165 		return -EPROTO;
5166 
5167 	req = (struct l2cap_conn_param_update_req *) data;
5168 	min		= __le16_to_cpu(req->min);
5169 	max		= __le16_to_cpu(req->max);
5170 	latency		= __le16_to_cpu(req->latency);
5171 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5172 
5173 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5174 	       min, max, latency, to_multiplier);
5175 
5176 	memset(&rsp, 0, sizeof(rsp));
5177 
5178 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5179 	if (err)
5180 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5181 	else
5182 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5183 
5184 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5185 		       sizeof(rsp), &rsp);
5186 
5187 	if (!err)
5188 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5189 
5190 	return 0;
5191 }
5192 
5193 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5194 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5195 				      u8 *data)
5196 {
5197 	int err = 0;
5198 
5199 	switch (cmd->code) {
5200 	case L2CAP_COMMAND_REJ:
5201 		l2cap_command_rej(conn, cmd, cmd_len, data);
5202 		break;
5203 
5204 	case L2CAP_CONN_REQ:
5205 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5206 		break;
5207 
5208 	case L2CAP_CONN_RSP:
5209 	case L2CAP_CREATE_CHAN_RSP:
5210 		err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5211 		break;
5212 
5213 	case L2CAP_CONF_REQ:
5214 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5215 		break;
5216 
5217 	case L2CAP_CONF_RSP:
5218 		err = l2cap_config_rsp(conn, cmd, cmd_len, data);
5219 		break;
5220 
5221 	case L2CAP_DISCONN_REQ:
5222 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5223 		break;
5224 
5225 	case L2CAP_DISCONN_RSP:
5226 		err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5227 		break;
5228 
5229 	case L2CAP_ECHO_REQ:
5230 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5231 		break;
5232 
5233 	case L2CAP_ECHO_RSP:
5234 		break;
5235 
5236 	case L2CAP_INFO_REQ:
5237 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5238 		break;
5239 
5240 	case L2CAP_INFO_RSP:
5241 		err = l2cap_information_rsp(conn, cmd, cmd_len, data);
5242 		break;
5243 
5244 	case L2CAP_CREATE_CHAN_REQ:
5245 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5246 		break;
5247 
5248 	case L2CAP_MOVE_CHAN_REQ:
5249 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5250 		break;
5251 
5252 	case L2CAP_MOVE_CHAN_RSP:
5253 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5254 		break;
5255 
5256 	case L2CAP_MOVE_CHAN_CFM:
5257 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5258 		break;
5259 
5260 	case L2CAP_MOVE_CHAN_CFM_RSP:
5261 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5262 		break;
5263 
5264 	default:
5265 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5266 		err = -EINVAL;
5267 		break;
5268 	}
5269 
5270 	return err;
5271 }
5272 
5273 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5274 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5275 {
5276 	switch (cmd->code) {
5277 	case L2CAP_COMMAND_REJ:
5278 		return 0;
5279 
5280 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5281 		return l2cap_conn_param_update_req(conn, cmd, data);
5282 
5283 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5284 		return 0;
5285 
5286 	default:
5287 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5288 		return -EINVAL;
5289 	}
5290 }
5291 
5292 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5293 				     struct sk_buff *skb)
5294 {
5295 	u8 *data = skb->data;
5296 	int len = skb->len;
5297 	struct l2cap_cmd_hdr cmd;
5298 	int err;
5299 
5300 	l2cap_raw_recv(conn, skb);
5301 
5302 	while (len >= L2CAP_CMD_HDR_SIZE) {
5303 		u16 cmd_len;
5304 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5305 		data += L2CAP_CMD_HDR_SIZE;
5306 		len  -= L2CAP_CMD_HDR_SIZE;
5307 
5308 		cmd_len = le16_to_cpu(cmd.len);
5309 
5310 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5311 		       cmd.ident);
5312 
5313 		if (cmd_len > len || !cmd.ident) {
5314 			BT_DBG("corrupted command");
5315 			break;
5316 		}
5317 
5318 		if (conn->hcon->type == LE_LINK)
5319 			err = l2cap_le_sig_cmd(conn, &cmd, data);
5320 		else
5321 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5322 
5323 		if (err) {
5324 			struct l2cap_cmd_rej_unk rej;
5325 
5326 			BT_ERR("Wrong link type (%d)", err);
5327 
5328 			/* FIXME: Map err to a valid reason */
5329 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5330 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5331 				       sizeof(rej), &rej);
5332 		}
5333 
5334 		data += cmd_len;
5335 		len  -= cmd_len;
5336 	}
5337 
5338 	kfree_skb(skb);
5339 }
5340 
5341 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5342 {
5343 	u16 our_fcs, rcv_fcs;
5344 	int hdr_size;
5345 
5346 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5347 		hdr_size = L2CAP_EXT_HDR_SIZE;
5348 	else
5349 		hdr_size = L2CAP_ENH_HDR_SIZE;
5350 
5351 	if (chan->fcs == L2CAP_FCS_CRC16) {
5352 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5353 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5354 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5355 
5356 		if (our_fcs != rcv_fcs)
5357 			return -EBADMSG;
5358 	}
5359 	return 0;
5360 }
5361 
5362 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5363 {
5364 	struct l2cap_ctrl control;
5365 
5366 	BT_DBG("chan %p", chan);
5367 
5368 	memset(&control, 0, sizeof(control));
5369 	control.sframe = 1;
5370 	control.final = 1;
5371 	control.reqseq = chan->buffer_seq;
5372 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5373 
5374 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5375 		control.super = L2CAP_SUPER_RNR;
5376 		l2cap_send_sframe(chan, &control);
5377 	}
5378 
5379 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5380 	    chan->unacked_frames > 0)
5381 		__set_retrans_timer(chan);
5382 
5383 	/* Send pending iframes */
5384 	l2cap_ertm_send(chan);
5385 
5386 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5387 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5388 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5389 		 * send it now.
5390 		 */
5391 		control.super = L2CAP_SUPER_RR;
5392 		l2cap_send_sframe(chan, &control);
5393 	}
5394 }
5395 
5396 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5397 			    struct sk_buff **last_frag)
5398 {
5399 	/* skb->len reflects data in skb as well as all fragments
5400 	 * skb->data_len reflects only data in fragments
5401 	 */
5402 	if (!skb_has_frag_list(skb))
5403 		skb_shinfo(skb)->frag_list = new_frag;
5404 
5405 	new_frag->next = NULL;
5406 
5407 	(*last_frag)->next = new_frag;
5408 	*last_frag = new_frag;
5409 
5410 	skb->len += new_frag->len;
5411 	skb->data_len += new_frag->len;
5412 	skb->truesize += new_frag->truesize;
5413 }
5414 
5415 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5416 				struct l2cap_ctrl *control)
5417 {
5418 	int err = -EINVAL;
5419 
5420 	switch (control->sar) {
5421 	case L2CAP_SAR_UNSEGMENTED:
5422 		if (chan->sdu)
5423 			break;
5424 
5425 		err = chan->ops->recv(chan, skb);
5426 		break;
5427 
5428 	case L2CAP_SAR_START:
5429 		if (chan->sdu)
5430 			break;
5431 
5432 		chan->sdu_len = get_unaligned_le16(skb->data);
5433 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5434 
5435 		if (chan->sdu_len > chan->imtu) {
5436 			err = -EMSGSIZE;
5437 			break;
5438 		}
5439 
5440 		if (skb->len >= chan->sdu_len)
5441 			break;
5442 
5443 		chan->sdu = skb;
5444 		chan->sdu_last_frag = skb;
5445 
5446 		skb = NULL;
5447 		err = 0;
5448 		break;
5449 
5450 	case L2CAP_SAR_CONTINUE:
5451 		if (!chan->sdu)
5452 			break;
5453 
5454 		append_skb_frag(chan->sdu, skb,
5455 				&chan->sdu_last_frag);
5456 		skb = NULL;
5457 
5458 		if (chan->sdu->len >= chan->sdu_len)
5459 			break;
5460 
5461 		err = 0;
5462 		break;
5463 
5464 	case L2CAP_SAR_END:
5465 		if (!chan->sdu)
5466 			break;
5467 
5468 		append_skb_frag(chan->sdu, skb,
5469 				&chan->sdu_last_frag);
5470 		skb = NULL;
5471 
5472 		if (chan->sdu->len != chan->sdu_len)
5473 			break;
5474 
5475 		err = chan->ops->recv(chan, chan->sdu);
5476 
5477 		if (!err) {
5478 			/* Reassembly complete */
5479 			chan->sdu = NULL;
5480 			chan->sdu_last_frag = NULL;
5481 			chan->sdu_len = 0;
5482 		}
5483 		break;
5484 	}
5485 
5486 	if (err) {
5487 		kfree_skb(skb);
5488 		kfree_skb(chan->sdu);
5489 		chan->sdu = NULL;
5490 		chan->sdu_last_frag = NULL;
5491 		chan->sdu_len = 0;
5492 	}
5493 
5494 	return err;
5495 }
5496 
5497 static int l2cap_resegment(struct l2cap_chan *chan)
5498 {
5499 	/* Placeholder */
5500 	return 0;
5501 }
5502 
5503 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5504 {
5505 	u8 event;
5506 
5507 	if (chan->mode != L2CAP_MODE_ERTM)
5508 		return;
5509 
5510 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5511 	l2cap_tx(chan, NULL, NULL, event);
5512 }
5513 
5514 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5515 {
5516 	int err = 0;
5517 	/* Pass sequential frames to l2cap_reassemble_sdu()
5518 	 * until a gap is encountered.
5519 	 */
5520 
5521 	BT_DBG("chan %p", chan);
5522 
5523 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5524 		struct sk_buff *skb;
5525 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5526 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5527 
5528 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5529 
5530 		if (!skb)
5531 			break;
5532 
5533 		skb_unlink(skb, &chan->srej_q);
5534 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5535 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5536 		if (err)
5537 			break;
5538 	}
5539 
5540 	if (skb_queue_empty(&chan->srej_q)) {
5541 		chan->rx_state = L2CAP_RX_STATE_RECV;
5542 		l2cap_send_ack(chan);
5543 	}
5544 
5545 	return err;
5546 }
5547 
5548 static void l2cap_handle_srej(struct l2cap_chan *chan,
5549 			      struct l2cap_ctrl *control)
5550 {
5551 	struct sk_buff *skb;
5552 
5553 	BT_DBG("chan %p, control %p", chan, control);
5554 
5555 	if (control->reqseq == chan->next_tx_seq) {
5556 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5557 		l2cap_send_disconn_req(chan, ECONNRESET);
5558 		return;
5559 	}
5560 
5561 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5562 
5563 	if (skb == NULL) {
5564 		BT_DBG("Seq %d not available for retransmission",
5565 		       control->reqseq);
5566 		return;
5567 	}
5568 
5569 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5570 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5571 		l2cap_send_disconn_req(chan, ECONNRESET);
5572 		return;
5573 	}
5574 
5575 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5576 
5577 	if (control->poll) {
5578 		l2cap_pass_to_tx(chan, control);
5579 
5580 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5581 		l2cap_retransmit(chan, control);
5582 		l2cap_ertm_send(chan);
5583 
5584 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5585 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5586 			chan->srej_save_reqseq = control->reqseq;
5587 		}
5588 	} else {
5589 		l2cap_pass_to_tx_fbit(chan, control);
5590 
5591 		if (control->final) {
5592 			if (chan->srej_save_reqseq != control->reqseq ||
5593 			    !test_and_clear_bit(CONN_SREJ_ACT,
5594 						&chan->conn_state))
5595 				l2cap_retransmit(chan, control);
5596 		} else {
5597 			l2cap_retransmit(chan, control);
5598 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5599 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5600 				chan->srej_save_reqseq = control->reqseq;
5601 			}
5602 		}
5603 	}
5604 }
5605 
5606 static void l2cap_handle_rej(struct l2cap_chan *chan,
5607 			     struct l2cap_ctrl *control)
5608 {
5609 	struct sk_buff *skb;
5610 
5611 	BT_DBG("chan %p, control %p", chan, control);
5612 
5613 	if (control->reqseq == chan->next_tx_seq) {
5614 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5615 		l2cap_send_disconn_req(chan, ECONNRESET);
5616 		return;
5617 	}
5618 
5619 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5620 
5621 	if (chan->max_tx && skb &&
5622 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5623 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5624 		l2cap_send_disconn_req(chan, ECONNRESET);
5625 		return;
5626 	}
5627 
5628 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5629 
5630 	l2cap_pass_to_tx(chan, control);
5631 
5632 	if (control->final) {
5633 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5634 			l2cap_retransmit_all(chan, control);
5635 	} else {
5636 		l2cap_retransmit_all(chan, control);
5637 		l2cap_ertm_send(chan);
5638 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5639 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5640 	}
5641 }
5642 
5643 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5644 {
5645 	BT_DBG("chan %p, txseq %d", chan, txseq);
5646 
5647 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5648 	       chan->expected_tx_seq);
5649 
5650 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5651 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5652 		    chan->tx_win) {
5653 			/* See notes below regarding "double poll" and
5654 			 * invalid packets.
5655 			 */
5656 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5657 				BT_DBG("Invalid/Ignore - after SREJ");
5658 				return L2CAP_TXSEQ_INVALID_IGNORE;
5659 			} else {
5660 				BT_DBG("Invalid - in window after SREJ sent");
5661 				return L2CAP_TXSEQ_INVALID;
5662 			}
5663 		}
5664 
5665 		if (chan->srej_list.head == txseq) {
5666 			BT_DBG("Expected SREJ");
5667 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5668 		}
5669 
5670 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5671 			BT_DBG("Duplicate SREJ - txseq already stored");
5672 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5673 		}
5674 
5675 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5676 			BT_DBG("Unexpected SREJ - not requested");
5677 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5678 		}
5679 	}
5680 
5681 	if (chan->expected_tx_seq == txseq) {
5682 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5683 		    chan->tx_win) {
5684 			BT_DBG("Invalid - txseq outside tx window");
5685 			return L2CAP_TXSEQ_INVALID;
5686 		} else {
5687 			BT_DBG("Expected");
5688 			return L2CAP_TXSEQ_EXPECTED;
5689 		}
5690 	}
5691 
5692 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5693 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5694 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5695 		return L2CAP_TXSEQ_DUPLICATE;
5696 	}
5697 
5698 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5699 		/* A source of invalid packets is a "double poll" condition,
5700 		 * where delays cause us to send multiple poll packets.  If
5701 		 * the remote stack receives and processes both polls,
5702 		 * sequence numbers can wrap around in such a way that a
5703 		 * resent frame has a sequence number that looks like new data
5704 		 * with a sequence gap.  This would trigger an erroneous SREJ
5705 		 * request.
5706 		 *
5707 		 * Fortunately, this is impossible with a tx window that's
5708 		 * less than half of the maximum sequence number, which allows
5709 		 * invalid frames to be safely ignored.
5710 		 *
5711 		 * With tx window sizes greater than half of the tx window
5712 		 * maximum, the frame is invalid and cannot be ignored.  This
5713 		 * causes a disconnect.
5714 		 */
5715 
5716 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5717 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5718 			return L2CAP_TXSEQ_INVALID_IGNORE;
5719 		} else {
5720 			BT_DBG("Invalid - txseq outside tx window");
5721 			return L2CAP_TXSEQ_INVALID;
5722 		}
5723 	} else {
5724 		BT_DBG("Unexpected - txseq indicates missing frames");
5725 		return L2CAP_TXSEQ_UNEXPECTED;
5726 	}
5727 }
5728 
5729 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5730 			       struct l2cap_ctrl *control,
5731 			       struct sk_buff *skb, u8 event)
5732 {
5733 	int err = 0;
5734 	bool skb_in_use = 0;
5735 
5736 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5737 	       event);
5738 
5739 	switch (event) {
5740 	case L2CAP_EV_RECV_IFRAME:
5741 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5742 		case L2CAP_TXSEQ_EXPECTED:
5743 			l2cap_pass_to_tx(chan, control);
5744 
5745 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5746 				BT_DBG("Busy, discarding expected seq %d",
5747 				       control->txseq);
5748 				break;
5749 			}
5750 
5751 			chan->expected_tx_seq = __next_seq(chan,
5752 							   control->txseq);
5753 
5754 			chan->buffer_seq = chan->expected_tx_seq;
5755 			skb_in_use = 1;
5756 
5757 			err = l2cap_reassemble_sdu(chan, skb, control);
5758 			if (err)
5759 				break;
5760 
5761 			if (control->final) {
5762 				if (!test_and_clear_bit(CONN_REJ_ACT,
5763 							&chan->conn_state)) {
5764 					control->final = 0;
5765 					l2cap_retransmit_all(chan, control);
5766 					l2cap_ertm_send(chan);
5767 				}
5768 			}
5769 
5770 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5771 				l2cap_send_ack(chan);
5772 			break;
5773 		case L2CAP_TXSEQ_UNEXPECTED:
5774 			l2cap_pass_to_tx(chan, control);
5775 
5776 			/* Can't issue SREJ frames in the local busy state.
5777 			 * Drop this frame, it will be seen as missing
5778 			 * when local busy is exited.
5779 			 */
5780 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5781 				BT_DBG("Busy, discarding unexpected seq %d",
5782 				       control->txseq);
5783 				break;
5784 			}
5785 
5786 			/* There was a gap in the sequence, so an SREJ
5787 			 * must be sent for each missing frame.  The
5788 			 * current frame is stored for later use.
5789 			 */
5790 			skb_queue_tail(&chan->srej_q, skb);
5791 			skb_in_use = 1;
5792 			BT_DBG("Queued %p (queue len %d)", skb,
5793 			       skb_queue_len(&chan->srej_q));
5794 
5795 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5796 			l2cap_seq_list_clear(&chan->srej_list);
5797 			l2cap_send_srej(chan, control->txseq);
5798 
5799 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5800 			break;
5801 		case L2CAP_TXSEQ_DUPLICATE:
5802 			l2cap_pass_to_tx(chan, control);
5803 			break;
5804 		case L2CAP_TXSEQ_INVALID_IGNORE:
5805 			break;
5806 		case L2CAP_TXSEQ_INVALID:
5807 		default:
5808 			l2cap_send_disconn_req(chan, ECONNRESET);
5809 			break;
5810 		}
5811 		break;
5812 	case L2CAP_EV_RECV_RR:
5813 		l2cap_pass_to_tx(chan, control);
5814 		if (control->final) {
5815 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5816 
5817 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5818 			    !__chan_is_moving(chan)) {
5819 				control->final = 0;
5820 				l2cap_retransmit_all(chan, control);
5821 			}
5822 
5823 			l2cap_ertm_send(chan);
5824 		} else if (control->poll) {
5825 			l2cap_send_i_or_rr_or_rnr(chan);
5826 		} else {
5827 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5828 					       &chan->conn_state) &&
5829 			    chan->unacked_frames)
5830 				__set_retrans_timer(chan);
5831 
5832 			l2cap_ertm_send(chan);
5833 		}
5834 		break;
5835 	case L2CAP_EV_RECV_RNR:
5836 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5837 		l2cap_pass_to_tx(chan, control);
5838 		if (control && control->poll) {
5839 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5840 			l2cap_send_rr_or_rnr(chan, 0);
5841 		}
5842 		__clear_retrans_timer(chan);
5843 		l2cap_seq_list_clear(&chan->retrans_list);
5844 		break;
5845 	case L2CAP_EV_RECV_REJ:
5846 		l2cap_handle_rej(chan, control);
5847 		break;
5848 	case L2CAP_EV_RECV_SREJ:
5849 		l2cap_handle_srej(chan, control);
5850 		break;
5851 	default:
5852 		break;
5853 	}
5854 
5855 	if (skb && !skb_in_use) {
5856 		BT_DBG("Freeing %p", skb);
5857 		kfree_skb(skb);
5858 	}
5859 
5860 	return err;
5861 }
5862 
5863 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5864 				    struct l2cap_ctrl *control,
5865 				    struct sk_buff *skb, u8 event)
5866 {
5867 	int err = 0;
5868 	u16 txseq = control->txseq;
5869 	bool skb_in_use = 0;
5870 
5871 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5872 	       event);
5873 
5874 	switch (event) {
5875 	case L2CAP_EV_RECV_IFRAME:
5876 		switch (l2cap_classify_txseq(chan, txseq)) {
5877 		case L2CAP_TXSEQ_EXPECTED:
5878 			/* Keep frame for reassembly later */
5879 			l2cap_pass_to_tx(chan, control);
5880 			skb_queue_tail(&chan->srej_q, skb);
5881 			skb_in_use = 1;
5882 			BT_DBG("Queued %p (queue len %d)", skb,
5883 			       skb_queue_len(&chan->srej_q));
5884 
5885 			chan->expected_tx_seq = __next_seq(chan, txseq);
5886 			break;
5887 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5888 			l2cap_seq_list_pop(&chan->srej_list);
5889 
5890 			l2cap_pass_to_tx(chan, control);
5891 			skb_queue_tail(&chan->srej_q, skb);
5892 			skb_in_use = 1;
5893 			BT_DBG("Queued %p (queue len %d)", skb,
5894 			       skb_queue_len(&chan->srej_q));
5895 
5896 			err = l2cap_rx_queued_iframes(chan);
5897 			if (err)
5898 				break;
5899 
5900 			break;
5901 		case L2CAP_TXSEQ_UNEXPECTED:
5902 			/* Got a frame that can't be reassembled yet.
5903 			 * Save it for later, and send SREJs to cover
5904 			 * the missing frames.
5905 			 */
5906 			skb_queue_tail(&chan->srej_q, skb);
5907 			skb_in_use = 1;
5908 			BT_DBG("Queued %p (queue len %d)", skb,
5909 			       skb_queue_len(&chan->srej_q));
5910 
5911 			l2cap_pass_to_tx(chan, control);
5912 			l2cap_send_srej(chan, control->txseq);
5913 			break;
5914 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5915 			/* This frame was requested with an SREJ, but
5916 			 * some expected retransmitted frames are
5917 			 * missing.  Request retransmission of missing
5918 			 * SREJ'd frames.
5919 			 */
5920 			skb_queue_tail(&chan->srej_q, skb);
5921 			skb_in_use = 1;
5922 			BT_DBG("Queued %p (queue len %d)", skb,
5923 			       skb_queue_len(&chan->srej_q));
5924 
5925 			l2cap_pass_to_tx(chan, control);
5926 			l2cap_send_srej_list(chan, control->txseq);
5927 			break;
5928 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5929 			/* We've already queued this frame.  Drop this copy. */
5930 			l2cap_pass_to_tx(chan, control);
5931 			break;
5932 		case L2CAP_TXSEQ_DUPLICATE:
5933 			/* Expecting a later sequence number, so this frame
5934 			 * was already received.  Ignore it completely.
5935 			 */
5936 			break;
5937 		case L2CAP_TXSEQ_INVALID_IGNORE:
5938 			break;
5939 		case L2CAP_TXSEQ_INVALID:
5940 		default:
5941 			l2cap_send_disconn_req(chan, ECONNRESET);
5942 			break;
5943 		}
5944 		break;
5945 	case L2CAP_EV_RECV_RR:
5946 		l2cap_pass_to_tx(chan, control);
5947 		if (control->final) {
5948 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5949 
5950 			if (!test_and_clear_bit(CONN_REJ_ACT,
5951 						&chan->conn_state)) {
5952 				control->final = 0;
5953 				l2cap_retransmit_all(chan, control);
5954 			}
5955 
5956 			l2cap_ertm_send(chan);
5957 		} else if (control->poll) {
5958 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5959 					       &chan->conn_state) &&
5960 			    chan->unacked_frames) {
5961 				__set_retrans_timer(chan);
5962 			}
5963 
5964 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5965 			l2cap_send_srej_tail(chan);
5966 		} else {
5967 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5968 					       &chan->conn_state) &&
5969 			    chan->unacked_frames)
5970 				__set_retrans_timer(chan);
5971 
5972 			l2cap_send_ack(chan);
5973 		}
5974 		break;
5975 	case L2CAP_EV_RECV_RNR:
5976 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5977 		l2cap_pass_to_tx(chan, control);
5978 		if (control->poll) {
5979 			l2cap_send_srej_tail(chan);
5980 		} else {
5981 			struct l2cap_ctrl rr_control;
5982 			memset(&rr_control, 0, sizeof(rr_control));
5983 			rr_control.sframe = 1;
5984 			rr_control.super = L2CAP_SUPER_RR;
5985 			rr_control.reqseq = chan->buffer_seq;
5986 			l2cap_send_sframe(chan, &rr_control);
5987 		}
5988 
5989 		break;
5990 	case L2CAP_EV_RECV_REJ:
5991 		l2cap_handle_rej(chan, control);
5992 		break;
5993 	case L2CAP_EV_RECV_SREJ:
5994 		l2cap_handle_srej(chan, control);
5995 		break;
5996 	}
5997 
5998 	if (skb && !skb_in_use) {
5999 		BT_DBG("Freeing %p", skb);
6000 		kfree_skb(skb);
6001 	}
6002 
6003 	return err;
6004 }
6005 
6006 static int l2cap_finish_move(struct l2cap_chan *chan)
6007 {
6008 	BT_DBG("chan %p", chan);
6009 
6010 	chan->rx_state = L2CAP_RX_STATE_RECV;
6011 
6012 	if (chan->hs_hcon)
6013 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6014 	else
6015 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6016 
6017 	return l2cap_resegment(chan);
6018 }
6019 
6020 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6021 				 struct l2cap_ctrl *control,
6022 				 struct sk_buff *skb, u8 event)
6023 {
6024 	int err;
6025 
6026 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6027 	       event);
6028 
6029 	if (!control->poll)
6030 		return -EPROTO;
6031 
6032 	l2cap_process_reqseq(chan, control->reqseq);
6033 
6034 	if (!skb_queue_empty(&chan->tx_q))
6035 		chan->tx_send_head = skb_peek(&chan->tx_q);
6036 	else
6037 		chan->tx_send_head = NULL;
6038 
6039 	/* Rewind next_tx_seq to the point expected
6040 	 * by the receiver.
6041 	 */
6042 	chan->next_tx_seq = control->reqseq;
6043 	chan->unacked_frames = 0;
6044 
6045 	err = l2cap_finish_move(chan);
6046 	if (err)
6047 		return err;
6048 
6049 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6050 	l2cap_send_i_or_rr_or_rnr(chan);
6051 
6052 	if (event == L2CAP_EV_RECV_IFRAME)
6053 		return -EPROTO;
6054 
6055 	return l2cap_rx_state_recv(chan, control, NULL, event);
6056 }
6057 
6058 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6059 				 struct l2cap_ctrl *control,
6060 				 struct sk_buff *skb, u8 event)
6061 {
6062 	int err;
6063 
6064 	if (!control->final)
6065 		return -EPROTO;
6066 
6067 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6068 
6069 	chan->rx_state = L2CAP_RX_STATE_RECV;
6070 	l2cap_process_reqseq(chan, control->reqseq);
6071 
6072 	if (!skb_queue_empty(&chan->tx_q))
6073 		chan->tx_send_head = skb_peek(&chan->tx_q);
6074 	else
6075 		chan->tx_send_head = NULL;
6076 
6077 	/* Rewind next_tx_seq to the point expected
6078 	 * by the receiver.
6079 	 */
6080 	chan->next_tx_seq = control->reqseq;
6081 	chan->unacked_frames = 0;
6082 
6083 	if (chan->hs_hcon)
6084 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6085 	else
6086 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6087 
6088 	err = l2cap_resegment(chan);
6089 
6090 	if (!err)
6091 		err = l2cap_rx_state_recv(chan, control, skb, event);
6092 
6093 	return err;
6094 }
6095 
6096 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6097 {
6098 	/* Make sure reqseq is for a packet that has been sent but not acked */
6099 	u16 unacked;
6100 
6101 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6102 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6103 }
6104 
6105 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6106 		    struct sk_buff *skb, u8 event)
6107 {
6108 	int err = 0;
6109 
6110 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6111 	       control, skb, event, chan->rx_state);
6112 
6113 	if (__valid_reqseq(chan, control->reqseq)) {
6114 		switch (chan->rx_state) {
6115 		case L2CAP_RX_STATE_RECV:
6116 			err = l2cap_rx_state_recv(chan, control, skb, event);
6117 			break;
6118 		case L2CAP_RX_STATE_SREJ_SENT:
6119 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6120 						       event);
6121 			break;
6122 		case L2CAP_RX_STATE_WAIT_P:
6123 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6124 			break;
6125 		case L2CAP_RX_STATE_WAIT_F:
6126 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6127 			break;
6128 		default:
6129 			/* shut it down */
6130 			break;
6131 		}
6132 	} else {
6133 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6134 		       control->reqseq, chan->next_tx_seq,
6135 		       chan->expected_ack_seq);
6136 		l2cap_send_disconn_req(chan, ECONNRESET);
6137 	}
6138 
6139 	return err;
6140 }
6141 
6142 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6143 			   struct sk_buff *skb)
6144 {
6145 	int err = 0;
6146 
6147 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6148 	       chan->rx_state);
6149 
6150 	if (l2cap_classify_txseq(chan, control->txseq) ==
6151 	    L2CAP_TXSEQ_EXPECTED) {
6152 		l2cap_pass_to_tx(chan, control);
6153 
6154 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6155 		       __next_seq(chan, chan->buffer_seq));
6156 
6157 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6158 
6159 		l2cap_reassemble_sdu(chan, skb, control);
6160 	} else {
6161 		if (chan->sdu) {
6162 			kfree_skb(chan->sdu);
6163 			chan->sdu = NULL;
6164 		}
6165 		chan->sdu_last_frag = NULL;
6166 		chan->sdu_len = 0;
6167 
6168 		if (skb) {
6169 			BT_DBG("Freeing %p", skb);
6170 			kfree_skb(skb);
6171 		}
6172 	}
6173 
6174 	chan->last_acked_seq = control->txseq;
6175 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6176 
6177 	return err;
6178 }
6179 
6180 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6181 {
6182 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6183 	u16 len;
6184 	u8 event;
6185 
6186 	__unpack_control(chan, skb);
6187 
6188 	len = skb->len;
6189 
6190 	/*
6191 	 * We can just drop the corrupted I-frame here.
6192 	 * Receiver will miss it and start proper recovery
6193 	 * procedures and ask for retransmission.
6194 	 */
6195 	if (l2cap_check_fcs(chan, skb))
6196 		goto drop;
6197 
6198 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6199 		len -= L2CAP_SDULEN_SIZE;
6200 
6201 	if (chan->fcs == L2CAP_FCS_CRC16)
6202 		len -= L2CAP_FCS_SIZE;
6203 
6204 	if (len > chan->mps) {
6205 		l2cap_send_disconn_req(chan, ECONNRESET);
6206 		goto drop;
6207 	}
6208 
6209 	if (!control->sframe) {
6210 		int err;
6211 
6212 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6213 		       control->sar, control->reqseq, control->final,
6214 		       control->txseq);
6215 
6216 		/* Validate F-bit - F=0 always valid, F=1 only
6217 		 * valid in TX WAIT_F
6218 		 */
6219 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6220 			goto drop;
6221 
6222 		if (chan->mode != L2CAP_MODE_STREAMING) {
6223 			event = L2CAP_EV_RECV_IFRAME;
6224 			err = l2cap_rx(chan, control, skb, event);
6225 		} else {
6226 			err = l2cap_stream_rx(chan, control, skb);
6227 		}
6228 
6229 		if (err)
6230 			l2cap_send_disconn_req(chan, ECONNRESET);
6231 	} else {
6232 		const u8 rx_func_to_event[4] = {
6233 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6234 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6235 		};
6236 
6237 		/* Only I-frames are expected in streaming mode */
6238 		if (chan->mode == L2CAP_MODE_STREAMING)
6239 			goto drop;
6240 
6241 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6242 		       control->reqseq, control->final, control->poll,
6243 		       control->super);
6244 
6245 		if (len != 0) {
6246 			BT_ERR("Trailing bytes: %d in sframe", len);
6247 			l2cap_send_disconn_req(chan, ECONNRESET);
6248 			goto drop;
6249 		}
6250 
6251 		/* Validate F and P bits */
6252 		if (control->final && (control->poll ||
6253 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6254 			goto drop;
6255 
6256 		event = rx_func_to_event[control->super];
6257 		if (l2cap_rx(chan, control, skb, event))
6258 			l2cap_send_disconn_req(chan, ECONNRESET);
6259 	}
6260 
6261 	return 0;
6262 
6263 drop:
6264 	kfree_skb(skb);
6265 	return 0;
6266 }
6267 
6268 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6269 			       struct sk_buff *skb)
6270 {
6271 	struct l2cap_chan *chan;
6272 
6273 	chan = l2cap_get_chan_by_scid(conn, cid);
6274 	if (!chan) {
6275 		if (cid == L2CAP_CID_A2MP) {
6276 			chan = a2mp_channel_create(conn, skb);
6277 			if (!chan) {
6278 				kfree_skb(skb);
6279 				return;
6280 			}
6281 
6282 			l2cap_chan_lock(chan);
6283 		} else {
6284 			BT_DBG("unknown cid 0x%4.4x", cid);
6285 			/* Drop packet and return */
6286 			kfree_skb(skb);
6287 			return;
6288 		}
6289 	}
6290 
6291 	BT_DBG("chan %p, len %d", chan, skb->len);
6292 
6293 	if (chan->state != BT_CONNECTED)
6294 		goto drop;
6295 
6296 	switch (chan->mode) {
6297 	case L2CAP_MODE_BASIC:
6298 		/* If socket recv buffers overflows we drop data here
6299 		 * which is *bad* because L2CAP has to be reliable.
6300 		 * But we don't have any other choice. L2CAP doesn't
6301 		 * provide flow control mechanism. */
6302 
6303 		if (chan->imtu < skb->len)
6304 			goto drop;
6305 
6306 		if (!chan->ops->recv(chan, skb))
6307 			goto done;
6308 		break;
6309 
6310 	case L2CAP_MODE_ERTM:
6311 	case L2CAP_MODE_STREAMING:
6312 		l2cap_data_rcv(chan, skb);
6313 		goto done;
6314 
6315 	default:
6316 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6317 		break;
6318 	}
6319 
6320 drop:
6321 	kfree_skb(skb);
6322 
6323 done:
6324 	l2cap_chan_unlock(chan);
6325 }
6326 
6327 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6328 				  struct sk_buff *skb)
6329 {
6330 	struct l2cap_chan *chan;
6331 
6332 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6333 	if (!chan)
6334 		goto drop;
6335 
6336 	BT_DBG("chan %p, len %d", chan, skb->len);
6337 
6338 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6339 		goto drop;
6340 
6341 	if (chan->imtu < skb->len)
6342 		goto drop;
6343 
6344 	if (!chan->ops->recv(chan, skb))
6345 		return;
6346 
6347 drop:
6348 	kfree_skb(skb);
6349 }
6350 
6351 static void l2cap_att_channel(struct l2cap_conn *conn,
6352 			      struct sk_buff *skb)
6353 {
6354 	struct l2cap_chan *chan;
6355 
6356 	chan = l2cap_global_chan_by_scid(0, L2CAP_CID_LE_DATA,
6357 					 conn->src, conn->dst);
6358 	if (!chan)
6359 		goto drop;
6360 
6361 	BT_DBG("chan %p, len %d", chan, skb->len);
6362 
6363 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6364 		goto drop;
6365 
6366 	if (chan->imtu < skb->len)
6367 		goto drop;
6368 
6369 	if (!chan->ops->recv(chan, skb))
6370 		return;
6371 
6372 drop:
6373 	kfree_skb(skb);
6374 }
6375 
6376 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6377 {
6378 	struct l2cap_hdr *lh = (void *) skb->data;
6379 	u16 cid, len;
6380 	__le16 psm;
6381 
6382 	skb_pull(skb, L2CAP_HDR_SIZE);
6383 	cid = __le16_to_cpu(lh->cid);
6384 	len = __le16_to_cpu(lh->len);
6385 
6386 	if (len != skb->len) {
6387 		kfree_skb(skb);
6388 		return;
6389 	}
6390 
6391 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6392 
6393 	switch (cid) {
6394 	case L2CAP_CID_LE_SIGNALING:
6395 	case L2CAP_CID_SIGNALING:
6396 		l2cap_sig_channel(conn, skb);
6397 		break;
6398 
6399 	case L2CAP_CID_CONN_LESS:
6400 		psm = get_unaligned((__le16 *) skb->data);
6401 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6402 		l2cap_conless_channel(conn, psm, skb);
6403 		break;
6404 
6405 	case L2CAP_CID_LE_DATA:
6406 		l2cap_att_channel(conn, skb);
6407 		break;
6408 
6409 	case L2CAP_CID_SMP:
6410 		if (smp_sig_channel(conn, skb))
6411 			l2cap_conn_del(conn->hcon, EACCES);
6412 		break;
6413 
6414 	default:
6415 		l2cap_data_channel(conn, cid, skb);
6416 		break;
6417 	}
6418 }
6419 
6420 /* ---- L2CAP interface with lower layer (HCI) ---- */
6421 
6422 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6423 {
6424 	int exact = 0, lm1 = 0, lm2 = 0;
6425 	struct l2cap_chan *c;
6426 
6427 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6428 
6429 	/* Find listening sockets and check their link_mode */
6430 	read_lock(&chan_list_lock);
6431 	list_for_each_entry(c, &chan_list, global_l) {
6432 		struct sock *sk = c->sk;
6433 
6434 		if (c->state != BT_LISTEN)
6435 			continue;
6436 
6437 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6438 			lm1 |= HCI_LM_ACCEPT;
6439 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6440 				lm1 |= HCI_LM_MASTER;
6441 			exact++;
6442 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6443 			lm2 |= HCI_LM_ACCEPT;
6444 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6445 				lm2 |= HCI_LM_MASTER;
6446 		}
6447 	}
6448 	read_unlock(&chan_list_lock);
6449 
6450 	return exact ? lm1 : lm2;
6451 }
6452 
6453 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6454 {
6455 	struct l2cap_conn *conn;
6456 
6457 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6458 
6459 	if (!status) {
6460 		conn = l2cap_conn_add(hcon);
6461 		if (conn)
6462 			l2cap_conn_ready(conn);
6463 	} else {
6464 		l2cap_conn_del(hcon, bt_to_errno(status));
6465 	}
6466 }
6467 
6468 int l2cap_disconn_ind(struct hci_conn *hcon)
6469 {
6470 	struct l2cap_conn *conn = hcon->l2cap_data;
6471 
6472 	BT_DBG("hcon %p", hcon);
6473 
6474 	if (!conn)
6475 		return HCI_ERROR_REMOTE_USER_TERM;
6476 	return conn->disc_reason;
6477 }
6478 
6479 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6480 {
6481 	BT_DBG("hcon %p reason %d", hcon, reason);
6482 
6483 	l2cap_conn_del(hcon, bt_to_errno(reason));
6484 }
6485 
6486 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6487 {
6488 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6489 		return;
6490 
6491 	if (encrypt == 0x00) {
6492 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6493 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6494 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6495 			l2cap_chan_close(chan, ECONNREFUSED);
6496 	} else {
6497 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6498 			__clear_chan_timer(chan);
6499 	}
6500 }
6501 
6502 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6503 {
6504 	struct l2cap_conn *conn = hcon->l2cap_data;
6505 	struct l2cap_chan *chan;
6506 
6507 	if (!conn)
6508 		return 0;
6509 
6510 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6511 
6512 	if (hcon->type == LE_LINK) {
6513 		if (!status && encrypt)
6514 			smp_distribute_keys(conn, 0);
6515 		cancel_delayed_work(&conn->security_timer);
6516 	}
6517 
6518 	mutex_lock(&conn->chan_lock);
6519 
6520 	list_for_each_entry(chan, &conn->chan_l, list) {
6521 		l2cap_chan_lock(chan);
6522 
6523 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6524 		       state_to_string(chan->state));
6525 
6526 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6527 			l2cap_chan_unlock(chan);
6528 			continue;
6529 		}
6530 
6531 		if (chan->scid == L2CAP_CID_LE_DATA) {
6532 			if (!status && encrypt) {
6533 				chan->sec_level = hcon->sec_level;
6534 				l2cap_chan_ready(chan);
6535 			}
6536 
6537 			l2cap_chan_unlock(chan);
6538 			continue;
6539 		}
6540 
6541 		if (!__l2cap_no_conn_pending(chan)) {
6542 			l2cap_chan_unlock(chan);
6543 			continue;
6544 		}
6545 
6546 		if (!status && (chan->state == BT_CONNECTED ||
6547 				chan->state == BT_CONFIG)) {
6548 			struct sock *sk = chan->sk;
6549 
6550 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6551 			sk->sk_state_change(sk);
6552 
6553 			l2cap_check_encryption(chan, encrypt);
6554 			l2cap_chan_unlock(chan);
6555 			continue;
6556 		}
6557 
6558 		if (chan->state == BT_CONNECT) {
6559 			if (!status) {
6560 				l2cap_start_connection(chan);
6561 			} else {
6562 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6563 			}
6564 		} else if (chan->state == BT_CONNECT2) {
6565 			struct sock *sk = chan->sk;
6566 			struct l2cap_conn_rsp rsp;
6567 			__u16 res, stat;
6568 
6569 			lock_sock(sk);
6570 
6571 			if (!status) {
6572 				if (test_bit(BT_SK_DEFER_SETUP,
6573 					     &bt_sk(sk)->flags)) {
6574 					res = L2CAP_CR_PEND;
6575 					stat = L2CAP_CS_AUTHOR_PEND;
6576 					chan->ops->defer(chan);
6577 				} else {
6578 					__l2cap_state_change(chan, BT_CONFIG);
6579 					res = L2CAP_CR_SUCCESS;
6580 					stat = L2CAP_CS_NO_INFO;
6581 				}
6582 			} else {
6583 				__l2cap_state_change(chan, BT_DISCONN);
6584 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6585 				res = L2CAP_CR_SEC_BLOCK;
6586 				stat = L2CAP_CS_NO_INFO;
6587 			}
6588 
6589 			release_sock(sk);
6590 
6591 			rsp.scid   = cpu_to_le16(chan->dcid);
6592 			rsp.dcid   = cpu_to_le16(chan->scid);
6593 			rsp.result = cpu_to_le16(res);
6594 			rsp.status = cpu_to_le16(stat);
6595 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6596 				       sizeof(rsp), &rsp);
6597 
6598 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6599 			    res == L2CAP_CR_SUCCESS) {
6600 				char buf[128];
6601 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6602 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6603 					       L2CAP_CONF_REQ,
6604 					       l2cap_build_conf_req(chan, buf),
6605 					       buf);
6606 				chan->num_conf_req++;
6607 			}
6608 		}
6609 
6610 		l2cap_chan_unlock(chan);
6611 	}
6612 
6613 	mutex_unlock(&conn->chan_lock);
6614 
6615 	return 0;
6616 }
6617 
6618 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6619 {
6620 	struct l2cap_conn *conn = hcon->l2cap_data;
6621 	struct l2cap_hdr *hdr;
6622 	int len;
6623 
6624 	/* For AMP controller do not create l2cap conn */
6625 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6626 		goto drop;
6627 
6628 	if (!conn)
6629 		conn = l2cap_conn_add(hcon);
6630 
6631 	if (!conn)
6632 		goto drop;
6633 
6634 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6635 
6636 	switch (flags) {
6637 	case ACL_START:
6638 	case ACL_START_NO_FLUSH:
6639 	case ACL_COMPLETE:
6640 		if (conn->rx_len) {
6641 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6642 			kfree_skb(conn->rx_skb);
6643 			conn->rx_skb = NULL;
6644 			conn->rx_len = 0;
6645 			l2cap_conn_unreliable(conn, ECOMM);
6646 		}
6647 
6648 		/* Start fragment always begin with Basic L2CAP header */
6649 		if (skb->len < L2CAP_HDR_SIZE) {
6650 			BT_ERR("Frame is too short (len %d)", skb->len);
6651 			l2cap_conn_unreliable(conn, ECOMM);
6652 			goto drop;
6653 		}
6654 
6655 		hdr = (struct l2cap_hdr *) skb->data;
6656 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6657 
6658 		if (len == skb->len) {
6659 			/* Complete frame received */
6660 			l2cap_recv_frame(conn, skb);
6661 			return 0;
6662 		}
6663 
6664 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6665 
6666 		if (skb->len > len) {
6667 			BT_ERR("Frame is too long (len %d, expected len %d)",
6668 			       skb->len, len);
6669 			l2cap_conn_unreliable(conn, ECOMM);
6670 			goto drop;
6671 		}
6672 
6673 		/* Allocate skb for the complete frame (with header) */
6674 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6675 		if (!conn->rx_skb)
6676 			goto drop;
6677 
6678 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6679 					  skb->len);
6680 		conn->rx_len = len - skb->len;
6681 		break;
6682 
6683 	case ACL_CONT:
6684 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6685 
6686 		if (!conn->rx_len) {
6687 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6688 			l2cap_conn_unreliable(conn, ECOMM);
6689 			goto drop;
6690 		}
6691 
6692 		if (skb->len > conn->rx_len) {
6693 			BT_ERR("Fragment is too long (len %d, expected %d)",
6694 			       skb->len, conn->rx_len);
6695 			kfree_skb(conn->rx_skb);
6696 			conn->rx_skb = NULL;
6697 			conn->rx_len = 0;
6698 			l2cap_conn_unreliable(conn, ECOMM);
6699 			goto drop;
6700 		}
6701 
6702 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6703 					  skb->len);
6704 		conn->rx_len -= skb->len;
6705 
6706 		if (!conn->rx_len) {
6707 			/* Complete frame received */
6708 			l2cap_recv_frame(conn, conn->rx_skb);
6709 			conn->rx_skb = NULL;
6710 		}
6711 		break;
6712 	}
6713 
6714 drop:
6715 	kfree_skb(skb);
6716 	return 0;
6717 }
6718 
6719 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6720 {
6721 	struct l2cap_chan *c;
6722 
6723 	read_lock(&chan_list_lock);
6724 
6725 	list_for_each_entry(c, &chan_list, global_l) {
6726 		struct sock *sk = c->sk;
6727 
6728 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6729 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6730 			   c->state, __le16_to_cpu(c->psm),
6731 			   c->scid, c->dcid, c->imtu, c->omtu,
6732 			   c->sec_level, c->mode);
6733 	}
6734 
6735 	read_unlock(&chan_list_lock);
6736 
6737 	return 0;
6738 }
6739 
6740 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6741 {
6742 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6743 }
6744 
6745 static const struct file_operations l2cap_debugfs_fops = {
6746 	.open		= l2cap_debugfs_open,
6747 	.read		= seq_read,
6748 	.llseek		= seq_lseek,
6749 	.release	= single_release,
6750 };
6751 
6752 static struct dentry *l2cap_debugfs;
6753 
6754 int __init l2cap_init(void)
6755 {
6756 	int err;
6757 
6758 	err = l2cap_init_sockets();
6759 	if (err < 0)
6760 		return err;
6761 
6762 	if (bt_debugfs) {
6763 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6764 						    NULL, &l2cap_debugfs_fops);
6765 		if (!l2cap_debugfs)
6766 			BT_ERR("Failed to create L2CAP debug file");
6767 	}
6768 
6769 	return 0;
6770 }
6771 
6772 void l2cap_exit(void)
6773 {
6774 	debugfs_remove(l2cap_debugfs);
6775 	l2cap_cleanup_sockets();
6776 }
6777 
6778 module_param(disable_ertm, bool, 0644);
6779 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6780