xref: /linux/net/bluetooth/l2cap_core.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
41 #include <net/bluetooth/amp.h>
42 
43 bool disable_ertm;
44 
45 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
46 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 
48 static LIST_HEAD(chan_list);
49 static DEFINE_RWLOCK(chan_list_lock);
50 
51 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
52 				       u8 code, u8 ident, u16 dlen, void *data);
53 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 			   void *data);
55 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
56 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
57 
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 		     struct sk_buff_head *skbs, u8 event);
60 
61 /* ---- L2CAP channels ---- */
62 
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
64 						   u16 cid)
65 {
66 	struct l2cap_chan *c;
67 
68 	list_for_each_entry(c, &conn->chan_l, list) {
69 		if (c->dcid == cid)
70 			return c;
71 	}
72 	return NULL;
73 }
74 
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
76 						   u16 cid)
77 {
78 	struct l2cap_chan *c;
79 
80 	list_for_each_entry(c, &conn->chan_l, list) {
81 		if (c->scid == cid)
82 			return c;
83 	}
84 	return NULL;
85 }
86 
87 /* Find channel with given SCID.
88  * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
90 						 u16 cid)
91 {
92 	struct l2cap_chan *c;
93 
94 	mutex_lock(&conn->chan_lock);
95 	c = __l2cap_get_chan_by_scid(conn, cid);
96 	if (c)
97 		l2cap_chan_lock(c);
98 	mutex_unlock(&conn->chan_lock);
99 
100 	return c;
101 }
102 
103 /* Find channel with given DCID.
104  * Returns locked channel.
105  */
106 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
107 						 u16 cid)
108 {
109 	struct l2cap_chan *c;
110 
111 	mutex_lock(&conn->chan_lock);
112 	c = __l2cap_get_chan_by_dcid(conn, cid);
113 	if (c)
114 		l2cap_chan_lock(c);
115 	mutex_unlock(&conn->chan_lock);
116 
117 	return c;
118 }
119 
120 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
121 						    u8 ident)
122 {
123 	struct l2cap_chan *c;
124 
125 	list_for_each_entry(c, &conn->chan_l, list) {
126 		if (c->ident == ident)
127 			return c;
128 	}
129 	return NULL;
130 }
131 
132 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
133 						  u8 ident)
134 {
135 	struct l2cap_chan *c;
136 
137 	mutex_lock(&conn->chan_lock);
138 	c = __l2cap_get_chan_by_ident(conn, ident);
139 	if (c)
140 		l2cap_chan_lock(c);
141 	mutex_unlock(&conn->chan_lock);
142 
143 	return c;
144 }
145 
146 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &chan_list, global_l) {
151 		if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
152 			return c;
153 	}
154 	return NULL;
155 }
156 
157 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
158 {
159 	int err;
160 
161 	write_lock(&chan_list_lock);
162 
163 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 		err = -EADDRINUSE;
165 		goto done;
166 	}
167 
168 	if (psm) {
169 		chan->psm = psm;
170 		chan->sport = psm;
171 		err = 0;
172 	} else {
173 		u16 p;
174 
175 		err = -EINVAL;
176 		for (p = 0x1001; p < 0x1100; p += 2)
177 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
178 				chan->psm   = cpu_to_le16(p);
179 				chan->sport = cpu_to_le16(p);
180 				err = 0;
181 				break;
182 			}
183 	}
184 
185 done:
186 	write_unlock(&chan_list_lock);
187 	return err;
188 }
189 
190 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
191 {
192 	write_lock(&chan_list_lock);
193 
194 	chan->scid = scid;
195 
196 	write_unlock(&chan_list_lock);
197 
198 	return 0;
199 }
200 
201 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
202 {
203 	u16 cid = L2CAP_CID_DYN_START;
204 
205 	for (; cid < L2CAP_CID_DYN_END; cid++) {
206 		if (!__l2cap_get_chan_by_scid(conn, cid))
207 			return cid;
208 	}
209 
210 	return 0;
211 }
212 
213 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
214 {
215 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
216 	       state_to_string(state));
217 
218 	chan->state = state;
219 	chan->ops->state_change(chan, state);
220 }
221 
222 static void l2cap_state_change(struct l2cap_chan *chan, int state)
223 {
224 	struct sock *sk = chan->sk;
225 
226 	lock_sock(sk);
227 	__l2cap_state_change(chan, state);
228 	release_sock(sk);
229 }
230 
231 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
232 {
233 	struct sock *sk = chan->sk;
234 
235 	sk->sk_err = err;
236 }
237 
238 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
239 {
240 	struct sock *sk = chan->sk;
241 
242 	lock_sock(sk);
243 	__l2cap_chan_set_err(chan, err);
244 	release_sock(sk);
245 }
246 
247 static void __set_retrans_timer(struct l2cap_chan *chan)
248 {
249 	if (!delayed_work_pending(&chan->monitor_timer) &&
250 	    chan->retrans_timeout) {
251 		l2cap_set_timer(chan, &chan->retrans_timer,
252 				msecs_to_jiffies(chan->retrans_timeout));
253 	}
254 }
255 
256 static void __set_monitor_timer(struct l2cap_chan *chan)
257 {
258 	__clear_retrans_timer(chan);
259 	if (chan->monitor_timeout) {
260 		l2cap_set_timer(chan, &chan->monitor_timer,
261 				msecs_to_jiffies(chan->monitor_timeout));
262 	}
263 }
264 
265 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
266 					       u16 seq)
267 {
268 	struct sk_buff *skb;
269 
270 	skb_queue_walk(head, skb) {
271 		if (bt_cb(skb)->control.txseq == seq)
272 			return skb;
273 	}
274 
275 	return NULL;
276 }
277 
278 /* ---- L2CAP sequence number lists ---- */
279 
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281  * SREJ requests that are received and for frames that are to be
282  * retransmitted. These seq_list functions implement a singly-linked
283  * list in an array, where membership in the list can also be checked
284  * in constant time. Items can also be added to the tail of the list
285  * and removed from the head in constant time, without further memory
286  * allocs or frees.
287  */
288 
289 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
290 {
291 	size_t alloc_size, i;
292 
293 	/* Allocated size is a power of 2 to map sequence numbers
294 	 * (which may be up to 14 bits) in to a smaller array that is
295 	 * sized for the negotiated ERTM transmit windows.
296 	 */
297 	alloc_size = roundup_pow_of_two(size);
298 
299 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
300 	if (!seq_list->list)
301 		return -ENOMEM;
302 
303 	seq_list->mask = alloc_size - 1;
304 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
305 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 	for (i = 0; i < alloc_size; i++)
307 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
308 
309 	return 0;
310 }
311 
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
313 {
314 	kfree(seq_list->list);
315 }
316 
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
318 					   u16 seq)
319 {
320 	/* Constant-time check for list membership */
321 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
322 }
323 
324 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
325 {
326 	u16 mask = seq_list->mask;
327 
328 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
329 		/* In case someone tries to pop the head of an empty list */
330 		return L2CAP_SEQ_LIST_CLEAR;
331 	} else if (seq_list->head == seq) {
332 		/* Head can be removed in constant time */
333 		seq_list->head = seq_list->list[seq & mask];
334 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
335 
336 		if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
337 			seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338 			seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339 		}
340 	} else {
341 		/* Walk the list to find the sequence number */
342 		u16 prev = seq_list->head;
343 		while (seq_list->list[prev & mask] != seq) {
344 			prev = seq_list->list[prev & mask];
345 			if (prev == L2CAP_SEQ_LIST_TAIL)
346 				return L2CAP_SEQ_LIST_CLEAR;
347 		}
348 
349 		/* Unlink the number from the list and clear it */
350 		seq_list->list[prev & mask] = seq_list->list[seq & mask];
351 		seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
352 		if (seq_list->tail == seq)
353 			seq_list->tail = prev;
354 	}
355 	return seq;
356 }
357 
358 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
359 {
360 	/* Remove the head in constant time */
361 	return l2cap_seq_list_remove(seq_list, seq_list->head);
362 }
363 
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365 {
366 	u16 i;
367 
368 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 		return;
370 
371 	for (i = 0; i <= seq_list->mask; i++)
372 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373 
374 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
376 }
377 
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379 {
380 	u16 mask = seq_list->mask;
381 
382 	/* All appends happen in constant time */
383 
384 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 		seq_list->head = seq;
389 	else
390 		seq_list->list[seq_list->tail & mask] = seq;
391 
392 	seq_list->tail = seq;
393 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
394 }
395 
396 static void l2cap_chan_timeout(struct work_struct *work)
397 {
398 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
399 					       chan_timer.work);
400 	struct l2cap_conn *conn = chan->conn;
401 	int reason;
402 
403 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
404 
405 	mutex_lock(&conn->chan_lock);
406 	l2cap_chan_lock(chan);
407 
408 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
409 		reason = ECONNREFUSED;
410 	else if (chan->state == BT_CONNECT &&
411 		 chan->sec_level != BT_SECURITY_SDP)
412 		reason = ECONNREFUSED;
413 	else
414 		reason = ETIMEDOUT;
415 
416 	l2cap_chan_close(chan, reason);
417 
418 	l2cap_chan_unlock(chan);
419 
420 	chan->ops->close(chan);
421 	mutex_unlock(&conn->chan_lock);
422 
423 	l2cap_chan_put(chan);
424 }
425 
426 struct l2cap_chan *l2cap_chan_create(void)
427 {
428 	struct l2cap_chan *chan;
429 
430 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 	if (!chan)
432 		return NULL;
433 
434 	mutex_init(&chan->lock);
435 
436 	write_lock(&chan_list_lock);
437 	list_add(&chan->global_l, &chan_list);
438 	write_unlock(&chan_list_lock);
439 
440 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
441 
442 	chan->state = BT_OPEN;
443 
444 	kref_init(&chan->kref);
445 
446 	/* This flag is cleared in l2cap_chan_ready() */
447 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
448 
449 	BT_DBG("chan %p", chan);
450 
451 	return chan;
452 }
453 
454 static void l2cap_chan_destroy(struct kref *kref)
455 {
456 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
457 
458 	BT_DBG("chan %p", chan);
459 
460 	write_lock(&chan_list_lock);
461 	list_del(&chan->global_l);
462 	write_unlock(&chan_list_lock);
463 
464 	kfree(chan);
465 }
466 
467 void l2cap_chan_hold(struct l2cap_chan *c)
468 {
469 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
470 
471 	kref_get(&c->kref);
472 }
473 
474 void l2cap_chan_put(struct l2cap_chan *c)
475 {
476 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
477 
478 	kref_put(&c->kref, l2cap_chan_destroy);
479 }
480 
481 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
482 {
483 	chan->fcs  = L2CAP_FCS_CRC16;
484 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
485 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
486 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
487 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
488 	chan->sec_level = BT_SECURITY_LOW;
489 
490 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
491 }
492 
493 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
494 {
495 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
496 	       __le16_to_cpu(chan->psm), chan->dcid);
497 
498 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
499 
500 	chan->conn = conn;
501 
502 	switch (chan->chan_type) {
503 	case L2CAP_CHAN_CONN_ORIENTED:
504 		if (conn->hcon->type == LE_LINK) {
505 			/* LE connection */
506 			chan->omtu = L2CAP_DEFAULT_MTU;
507 			chan->scid = L2CAP_CID_LE_DATA;
508 			chan->dcid = L2CAP_CID_LE_DATA;
509 		} else {
510 			/* Alloc CID for connection-oriented socket */
511 			chan->scid = l2cap_alloc_cid(conn);
512 			chan->omtu = L2CAP_DEFAULT_MTU;
513 		}
514 		break;
515 
516 	case L2CAP_CHAN_CONN_LESS:
517 		/* Connectionless socket */
518 		chan->scid = L2CAP_CID_CONN_LESS;
519 		chan->dcid = L2CAP_CID_CONN_LESS;
520 		chan->omtu = L2CAP_DEFAULT_MTU;
521 		break;
522 
523 	case L2CAP_CHAN_CONN_FIX_A2MP:
524 		chan->scid = L2CAP_CID_A2MP;
525 		chan->dcid = L2CAP_CID_A2MP;
526 		chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
527 		chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
528 		break;
529 
530 	default:
531 		/* Raw socket can send/recv signalling messages only */
532 		chan->scid = L2CAP_CID_SIGNALING;
533 		chan->dcid = L2CAP_CID_SIGNALING;
534 		chan->omtu = L2CAP_DEFAULT_MTU;
535 	}
536 
537 	chan->local_id		= L2CAP_BESTEFFORT_ID;
538 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
539 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
540 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
541 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
542 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
543 
544 	l2cap_chan_hold(chan);
545 
546 	list_add(&chan->list, &conn->chan_l);
547 }
548 
549 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
550 {
551 	mutex_lock(&conn->chan_lock);
552 	__l2cap_chan_add(conn, chan);
553 	mutex_unlock(&conn->chan_lock);
554 }
555 
556 void l2cap_chan_del(struct l2cap_chan *chan, int err)
557 {
558 	struct l2cap_conn *conn = chan->conn;
559 
560 	__clear_chan_timer(chan);
561 
562 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
563 
564 	if (conn) {
565 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
566 		/* Delete from channel list */
567 		list_del(&chan->list);
568 
569 		l2cap_chan_put(chan);
570 
571 		chan->conn = NULL;
572 
573 		if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
574 			hci_conn_put(conn->hcon);
575 
576 		if (mgr && mgr->bredr_chan == chan)
577 			mgr->bredr_chan = NULL;
578 	}
579 
580 	if (chan->hs_hchan) {
581 		struct hci_chan *hs_hchan = chan->hs_hchan;
582 
583 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
584 		amp_disconnect_logical_link(hs_hchan);
585 	}
586 
587 	chan->ops->teardown(chan, err);
588 
589 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
590 		return;
591 
592 	switch(chan->mode) {
593 	case L2CAP_MODE_BASIC:
594 		break;
595 
596 	case L2CAP_MODE_ERTM:
597 		__clear_retrans_timer(chan);
598 		__clear_monitor_timer(chan);
599 		__clear_ack_timer(chan);
600 
601 		skb_queue_purge(&chan->srej_q);
602 
603 		l2cap_seq_list_free(&chan->srej_list);
604 		l2cap_seq_list_free(&chan->retrans_list);
605 
606 		/* fall through */
607 
608 	case L2CAP_MODE_STREAMING:
609 		skb_queue_purge(&chan->tx_q);
610 		break;
611 	}
612 
613 	return;
614 }
615 
616 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
617 {
618 	struct l2cap_conn *conn = chan->conn;
619 	struct sock *sk = chan->sk;
620 
621 	BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
622 	       sk);
623 
624 	switch (chan->state) {
625 	case BT_LISTEN:
626 		chan->ops->teardown(chan, 0);
627 		break;
628 
629 	case BT_CONNECTED:
630 	case BT_CONFIG:
631 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
632 		    conn->hcon->type == ACL_LINK) {
633 			__set_chan_timer(chan, sk->sk_sndtimeo);
634 			l2cap_send_disconn_req(chan, reason);
635 		} else
636 			l2cap_chan_del(chan, reason);
637 		break;
638 
639 	case BT_CONNECT2:
640 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
641 		    conn->hcon->type == ACL_LINK) {
642 			struct l2cap_conn_rsp rsp;
643 			__u16 result;
644 
645 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
646 				result = L2CAP_CR_SEC_BLOCK;
647 			else
648 				result = L2CAP_CR_BAD_PSM;
649 			l2cap_state_change(chan, BT_DISCONN);
650 
651 			rsp.scid   = cpu_to_le16(chan->dcid);
652 			rsp.dcid   = cpu_to_le16(chan->scid);
653 			rsp.result = cpu_to_le16(result);
654 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
655 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
656 				       sizeof(rsp), &rsp);
657 		}
658 
659 		l2cap_chan_del(chan, reason);
660 		break;
661 
662 	case BT_CONNECT:
663 	case BT_DISCONN:
664 		l2cap_chan_del(chan, reason);
665 		break;
666 
667 	default:
668 		chan->ops->teardown(chan, 0);
669 		break;
670 	}
671 }
672 
673 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
674 {
675 	if (chan->chan_type == L2CAP_CHAN_RAW) {
676 		switch (chan->sec_level) {
677 		case BT_SECURITY_HIGH:
678 			return HCI_AT_DEDICATED_BONDING_MITM;
679 		case BT_SECURITY_MEDIUM:
680 			return HCI_AT_DEDICATED_BONDING;
681 		default:
682 			return HCI_AT_NO_BONDING;
683 		}
684 	} else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
685 		if (chan->sec_level == BT_SECURITY_LOW)
686 			chan->sec_level = BT_SECURITY_SDP;
687 
688 		if (chan->sec_level == BT_SECURITY_HIGH)
689 			return HCI_AT_NO_BONDING_MITM;
690 		else
691 			return HCI_AT_NO_BONDING;
692 	} else {
693 		switch (chan->sec_level) {
694 		case BT_SECURITY_HIGH:
695 			return HCI_AT_GENERAL_BONDING_MITM;
696 		case BT_SECURITY_MEDIUM:
697 			return HCI_AT_GENERAL_BONDING;
698 		default:
699 			return HCI_AT_NO_BONDING;
700 		}
701 	}
702 }
703 
704 /* Service level security */
705 int l2cap_chan_check_security(struct l2cap_chan *chan)
706 {
707 	struct l2cap_conn *conn = chan->conn;
708 	__u8 auth_type;
709 
710 	auth_type = l2cap_get_auth_type(chan);
711 
712 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
713 }
714 
715 static u8 l2cap_get_ident(struct l2cap_conn *conn)
716 {
717 	u8 id;
718 
719 	/* Get next available identificator.
720 	 *    1 - 128 are used by kernel.
721 	 *  129 - 199 are reserved.
722 	 *  200 - 254 are used by utilities like l2ping, etc.
723 	 */
724 
725 	spin_lock(&conn->lock);
726 
727 	if (++conn->tx_ident > 128)
728 		conn->tx_ident = 1;
729 
730 	id = conn->tx_ident;
731 
732 	spin_unlock(&conn->lock);
733 
734 	return id;
735 }
736 
737 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
738 			   void *data)
739 {
740 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
741 	u8 flags;
742 
743 	BT_DBG("code 0x%2.2x", code);
744 
745 	if (!skb)
746 		return;
747 
748 	if (lmp_no_flush_capable(conn->hcon->hdev))
749 		flags = ACL_START_NO_FLUSH;
750 	else
751 		flags = ACL_START;
752 
753 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
754 	skb->priority = HCI_PRIO_MAX;
755 
756 	hci_send_acl(conn->hchan, skb, flags);
757 }
758 
759 static bool __chan_is_moving(struct l2cap_chan *chan)
760 {
761 	return chan->move_state != L2CAP_MOVE_STABLE &&
762 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
763 }
764 
765 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
766 {
767 	struct hci_conn *hcon = chan->conn->hcon;
768 	u16 flags;
769 
770 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
771 	       skb->priority);
772 
773 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
774 		if (chan->hs_hchan)
775 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
776 		else
777 			kfree_skb(skb);
778 
779 		return;
780 	}
781 
782 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
783 	    lmp_no_flush_capable(hcon->hdev))
784 		flags = ACL_START_NO_FLUSH;
785 	else
786 		flags = ACL_START;
787 
788 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
789 	hci_send_acl(chan->conn->hchan, skb, flags);
790 }
791 
792 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
793 {
794 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
795 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
796 
797 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
798 		/* S-Frame */
799 		control->sframe = 1;
800 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
801 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
802 
803 		control->sar = 0;
804 		control->txseq = 0;
805 	} else {
806 		/* I-Frame */
807 		control->sframe = 0;
808 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
809 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
810 
811 		control->poll = 0;
812 		control->super = 0;
813 	}
814 }
815 
816 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
817 {
818 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
819 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
820 
821 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
822 		/* S-Frame */
823 		control->sframe = 1;
824 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
825 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
826 
827 		control->sar = 0;
828 		control->txseq = 0;
829 	} else {
830 		/* I-Frame */
831 		control->sframe = 0;
832 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
833 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
834 
835 		control->poll = 0;
836 		control->super = 0;
837 	}
838 }
839 
840 static inline void __unpack_control(struct l2cap_chan *chan,
841 				    struct sk_buff *skb)
842 {
843 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
844 		__unpack_extended_control(get_unaligned_le32(skb->data),
845 					  &bt_cb(skb)->control);
846 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
847 	} else {
848 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
849 					  &bt_cb(skb)->control);
850 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
851 	}
852 }
853 
854 static u32 __pack_extended_control(struct l2cap_ctrl *control)
855 {
856 	u32 packed;
857 
858 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
859 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
860 
861 	if (control->sframe) {
862 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
863 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
864 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
865 	} else {
866 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
867 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
868 	}
869 
870 	return packed;
871 }
872 
873 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
874 {
875 	u16 packed;
876 
877 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
878 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
879 
880 	if (control->sframe) {
881 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
882 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
883 		packed |= L2CAP_CTRL_FRAME_TYPE;
884 	} else {
885 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
886 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
887 	}
888 
889 	return packed;
890 }
891 
892 static inline void __pack_control(struct l2cap_chan *chan,
893 				  struct l2cap_ctrl *control,
894 				  struct sk_buff *skb)
895 {
896 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
897 		put_unaligned_le32(__pack_extended_control(control),
898 				   skb->data + L2CAP_HDR_SIZE);
899 	} else {
900 		put_unaligned_le16(__pack_enhanced_control(control),
901 				   skb->data + L2CAP_HDR_SIZE);
902 	}
903 }
904 
905 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
906 {
907 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
908 		return L2CAP_EXT_HDR_SIZE;
909 	else
910 		return L2CAP_ENH_HDR_SIZE;
911 }
912 
913 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
914 					       u32 control)
915 {
916 	struct sk_buff *skb;
917 	struct l2cap_hdr *lh;
918 	int hlen = __ertm_hdr_size(chan);
919 
920 	if (chan->fcs == L2CAP_FCS_CRC16)
921 		hlen += L2CAP_FCS_SIZE;
922 
923 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
924 
925 	if (!skb)
926 		return ERR_PTR(-ENOMEM);
927 
928 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
929 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
930 	lh->cid = cpu_to_le16(chan->dcid);
931 
932 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
933 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
934 	else
935 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
936 
937 	if (chan->fcs == L2CAP_FCS_CRC16) {
938 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
939 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
940 	}
941 
942 	skb->priority = HCI_PRIO_MAX;
943 	return skb;
944 }
945 
946 static void l2cap_send_sframe(struct l2cap_chan *chan,
947 			      struct l2cap_ctrl *control)
948 {
949 	struct sk_buff *skb;
950 	u32 control_field;
951 
952 	BT_DBG("chan %p, control %p", chan, control);
953 
954 	if (!control->sframe)
955 		return;
956 
957 	if (__chan_is_moving(chan))
958 		return;
959 
960 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
961 	    !control->poll)
962 		control->final = 1;
963 
964 	if (control->super == L2CAP_SUPER_RR)
965 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
966 	else if (control->super == L2CAP_SUPER_RNR)
967 		set_bit(CONN_RNR_SENT, &chan->conn_state);
968 
969 	if (control->super != L2CAP_SUPER_SREJ) {
970 		chan->last_acked_seq = control->reqseq;
971 		__clear_ack_timer(chan);
972 	}
973 
974 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
975 	       control->final, control->poll, control->super);
976 
977 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
978 		control_field = __pack_extended_control(control);
979 	else
980 		control_field = __pack_enhanced_control(control);
981 
982 	skb = l2cap_create_sframe_pdu(chan, control_field);
983 	if (!IS_ERR(skb))
984 		l2cap_do_send(chan, skb);
985 }
986 
987 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
988 {
989 	struct l2cap_ctrl control;
990 
991 	BT_DBG("chan %p, poll %d", chan, poll);
992 
993 	memset(&control, 0, sizeof(control));
994 	control.sframe = 1;
995 	control.poll = poll;
996 
997 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
998 		control.super = L2CAP_SUPER_RNR;
999 	else
1000 		control.super = L2CAP_SUPER_RR;
1001 
1002 	control.reqseq = chan->buffer_seq;
1003 	l2cap_send_sframe(chan, &control);
1004 }
1005 
1006 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1007 {
1008 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1009 }
1010 
1011 static bool __amp_capable(struct l2cap_chan *chan)
1012 {
1013 	struct l2cap_conn *conn = chan->conn;
1014 
1015 	if (enable_hs &&
1016 	    hci_amp_capable() &&
1017 	    chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
1018 	    conn->fixed_chan_mask & L2CAP_FC_A2MP)
1019 		return true;
1020 	else
1021 		return false;
1022 }
1023 
1024 static bool l2cap_check_efs(struct l2cap_chan *chan)
1025 {
1026 	/* Check EFS parameters */
1027 	return true;
1028 }
1029 
1030 void l2cap_send_conn_req(struct l2cap_chan *chan)
1031 {
1032 	struct l2cap_conn *conn = chan->conn;
1033 	struct l2cap_conn_req req;
1034 
1035 	req.scid = cpu_to_le16(chan->scid);
1036 	req.psm  = chan->psm;
1037 
1038 	chan->ident = l2cap_get_ident(conn);
1039 
1040 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1041 
1042 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1043 }
1044 
1045 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1046 {
1047 	struct l2cap_create_chan_req req;
1048 	req.scid = cpu_to_le16(chan->scid);
1049 	req.psm  = chan->psm;
1050 	req.amp_id = amp_id;
1051 
1052 	chan->ident = l2cap_get_ident(chan->conn);
1053 
1054 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1055 		       sizeof(req), &req);
1056 }
1057 
1058 static void l2cap_move_setup(struct l2cap_chan *chan)
1059 {
1060 	struct sk_buff *skb;
1061 
1062 	BT_DBG("chan %p", chan);
1063 
1064 	if (chan->mode != L2CAP_MODE_ERTM)
1065 		return;
1066 
1067 	__clear_retrans_timer(chan);
1068 	__clear_monitor_timer(chan);
1069 	__clear_ack_timer(chan);
1070 
1071 	chan->retry_count = 0;
1072 	skb_queue_walk(&chan->tx_q, skb) {
1073 		if (bt_cb(skb)->control.retries)
1074 			bt_cb(skb)->control.retries = 1;
1075 		else
1076 			break;
1077 	}
1078 
1079 	chan->expected_tx_seq = chan->buffer_seq;
1080 
1081 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1082 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1083 	l2cap_seq_list_clear(&chan->retrans_list);
1084 	l2cap_seq_list_clear(&chan->srej_list);
1085 	skb_queue_purge(&chan->srej_q);
1086 
1087 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1088 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1089 
1090 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1091 }
1092 
1093 static void l2cap_move_done(struct l2cap_chan *chan)
1094 {
1095 	u8 move_role = chan->move_role;
1096 	BT_DBG("chan %p", chan);
1097 
1098 	chan->move_state = L2CAP_MOVE_STABLE;
1099 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1100 
1101 	if (chan->mode != L2CAP_MODE_ERTM)
1102 		return;
1103 
1104 	switch (move_role) {
1105 	case L2CAP_MOVE_ROLE_INITIATOR:
1106 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1107 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1108 		break;
1109 	case L2CAP_MOVE_ROLE_RESPONDER:
1110 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1111 		break;
1112 	}
1113 }
1114 
1115 static void l2cap_chan_ready(struct l2cap_chan *chan)
1116 {
1117 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1118 	chan->conf_state = 0;
1119 	__clear_chan_timer(chan);
1120 
1121 	chan->state = BT_CONNECTED;
1122 
1123 	chan->ops->ready(chan);
1124 }
1125 
1126 static void l2cap_start_connection(struct l2cap_chan *chan)
1127 {
1128 	if (__amp_capable(chan)) {
1129 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1130 		a2mp_discover_amp(chan);
1131 	} else {
1132 		l2cap_send_conn_req(chan);
1133 	}
1134 }
1135 
1136 static void l2cap_do_start(struct l2cap_chan *chan)
1137 {
1138 	struct l2cap_conn *conn = chan->conn;
1139 
1140 	if (conn->hcon->type == LE_LINK) {
1141 		l2cap_chan_ready(chan);
1142 		return;
1143 	}
1144 
1145 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1146 		if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1147 			return;
1148 
1149 		if (l2cap_chan_check_security(chan) &&
1150 		    __l2cap_no_conn_pending(chan)) {
1151 			l2cap_start_connection(chan);
1152 		}
1153 	} else {
1154 		struct l2cap_info_req req;
1155 		req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1156 
1157 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1158 		conn->info_ident = l2cap_get_ident(conn);
1159 
1160 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1161 
1162 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1163 			       sizeof(req), &req);
1164 	}
1165 }
1166 
1167 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1168 {
1169 	u32 local_feat_mask = l2cap_feat_mask;
1170 	if (!disable_ertm)
1171 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1172 
1173 	switch (mode) {
1174 	case L2CAP_MODE_ERTM:
1175 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1176 	case L2CAP_MODE_STREAMING:
1177 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1178 	default:
1179 		return 0x00;
1180 	}
1181 }
1182 
1183 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1184 {
1185 	struct sock *sk = chan->sk;
1186 	struct l2cap_conn *conn = chan->conn;
1187 	struct l2cap_disconn_req req;
1188 
1189 	if (!conn)
1190 		return;
1191 
1192 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1193 		__clear_retrans_timer(chan);
1194 		__clear_monitor_timer(chan);
1195 		__clear_ack_timer(chan);
1196 	}
1197 
1198 	if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1199 		l2cap_state_change(chan, BT_DISCONN);
1200 		return;
1201 	}
1202 
1203 	req.dcid = cpu_to_le16(chan->dcid);
1204 	req.scid = cpu_to_le16(chan->scid);
1205 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1206 		       sizeof(req), &req);
1207 
1208 	lock_sock(sk);
1209 	__l2cap_state_change(chan, BT_DISCONN);
1210 	__l2cap_chan_set_err(chan, err);
1211 	release_sock(sk);
1212 }
1213 
1214 /* ---- L2CAP connections ---- */
1215 static void l2cap_conn_start(struct l2cap_conn *conn)
1216 {
1217 	struct l2cap_chan *chan, *tmp;
1218 
1219 	BT_DBG("conn %p", conn);
1220 
1221 	mutex_lock(&conn->chan_lock);
1222 
1223 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1224 		struct sock *sk = chan->sk;
1225 
1226 		l2cap_chan_lock(chan);
1227 
1228 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 			l2cap_chan_unlock(chan);
1230 			continue;
1231 		}
1232 
1233 		if (chan->state == BT_CONNECT) {
1234 			if (!l2cap_chan_check_security(chan) ||
1235 			    !__l2cap_no_conn_pending(chan)) {
1236 				l2cap_chan_unlock(chan);
1237 				continue;
1238 			}
1239 
1240 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1241 			    && test_bit(CONF_STATE2_DEVICE,
1242 					&chan->conf_state)) {
1243 				l2cap_chan_close(chan, ECONNRESET);
1244 				l2cap_chan_unlock(chan);
1245 				continue;
1246 			}
1247 
1248 			l2cap_start_connection(chan);
1249 
1250 		} else if (chan->state == BT_CONNECT2) {
1251 			struct l2cap_conn_rsp rsp;
1252 			char buf[128];
1253 			rsp.scid = cpu_to_le16(chan->dcid);
1254 			rsp.dcid = cpu_to_le16(chan->scid);
1255 
1256 			if (l2cap_chan_check_security(chan)) {
1257 				lock_sock(sk);
1258 				if (test_bit(BT_SK_DEFER_SETUP,
1259 					     &bt_sk(sk)->flags)) {
1260 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1261 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1262 					chan->ops->defer(chan);
1263 
1264 				} else {
1265 					__l2cap_state_change(chan, BT_CONFIG);
1266 					rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1267 					rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1268 				}
1269 				release_sock(sk);
1270 			} else {
1271 				rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1272 				rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1273 			}
1274 
1275 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1276 				       sizeof(rsp), &rsp);
1277 
1278 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1279 			    rsp.result != L2CAP_CR_SUCCESS) {
1280 				l2cap_chan_unlock(chan);
1281 				continue;
1282 			}
1283 
1284 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1285 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1286 				       l2cap_build_conf_req(chan, buf), buf);
1287 			chan->num_conf_req++;
1288 		}
1289 
1290 		l2cap_chan_unlock(chan);
1291 	}
1292 
1293 	mutex_unlock(&conn->chan_lock);
1294 }
1295 
1296 /* Find socket with cid and source/destination bdaddr.
1297  * Returns closest match, locked.
1298  */
1299 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1300 						    bdaddr_t *src,
1301 						    bdaddr_t *dst)
1302 {
1303 	struct l2cap_chan *c, *c1 = NULL;
1304 
1305 	read_lock(&chan_list_lock);
1306 
1307 	list_for_each_entry(c, &chan_list, global_l) {
1308 		struct sock *sk = c->sk;
1309 
1310 		if (state && c->state != state)
1311 			continue;
1312 
1313 		if (c->scid == cid) {
1314 			int src_match, dst_match;
1315 			int src_any, dst_any;
1316 
1317 			/* Exact match. */
1318 			src_match = !bacmp(&bt_sk(sk)->src, src);
1319 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1320 			if (src_match && dst_match) {
1321 				read_unlock(&chan_list_lock);
1322 				return c;
1323 			}
1324 
1325 			/* Closest match */
1326 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1327 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1328 			if ((src_match && dst_any) || (src_any && dst_match) ||
1329 			    (src_any && dst_any))
1330 				c1 = c;
1331 		}
1332 	}
1333 
1334 	read_unlock(&chan_list_lock);
1335 
1336 	return c1;
1337 }
1338 
1339 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1340 {
1341 	struct sock *parent, *sk;
1342 	struct l2cap_chan *chan, *pchan;
1343 
1344 	BT_DBG("");
1345 
1346 	/* Check if we have socket listening on cid */
1347 	pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1348 					  conn->src, conn->dst);
1349 	if (!pchan)
1350 		return;
1351 
1352 	parent = pchan->sk;
1353 
1354 	lock_sock(parent);
1355 
1356 	chan = pchan->ops->new_connection(pchan);
1357 	if (!chan)
1358 		goto clean;
1359 
1360 	sk = chan->sk;
1361 
1362 	hci_conn_hold(conn->hcon);
1363 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1364 
1365 	bacpy(&bt_sk(sk)->src, conn->src);
1366 	bacpy(&bt_sk(sk)->dst, conn->dst);
1367 
1368 	l2cap_chan_add(conn, chan);
1369 
1370 	l2cap_chan_ready(chan);
1371 
1372 clean:
1373 	release_sock(parent);
1374 }
1375 
1376 static void l2cap_conn_ready(struct l2cap_conn *conn)
1377 {
1378 	struct l2cap_chan *chan;
1379 	struct hci_conn *hcon = conn->hcon;
1380 
1381 	BT_DBG("conn %p", conn);
1382 
1383 	if (!hcon->out && hcon->type == LE_LINK)
1384 		l2cap_le_conn_ready(conn);
1385 
1386 	if (hcon->out && hcon->type == LE_LINK)
1387 		smp_conn_security(hcon, hcon->pending_sec_level);
1388 
1389 	mutex_lock(&conn->chan_lock);
1390 
1391 	list_for_each_entry(chan, &conn->chan_l, list) {
1392 
1393 		l2cap_chan_lock(chan);
1394 
1395 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1396 			l2cap_chan_unlock(chan);
1397 			continue;
1398 		}
1399 
1400 		if (hcon->type == LE_LINK) {
1401 			if (smp_conn_security(hcon, chan->sec_level))
1402 				l2cap_chan_ready(chan);
1403 
1404 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1405 			struct sock *sk = chan->sk;
1406 			__clear_chan_timer(chan);
1407 			lock_sock(sk);
1408 			__l2cap_state_change(chan, BT_CONNECTED);
1409 			sk->sk_state_change(sk);
1410 			release_sock(sk);
1411 
1412 		} else if (chan->state == BT_CONNECT)
1413 			l2cap_do_start(chan);
1414 
1415 		l2cap_chan_unlock(chan);
1416 	}
1417 
1418 	mutex_unlock(&conn->chan_lock);
1419 }
1420 
1421 /* Notify sockets that we cannot guaranty reliability anymore */
1422 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1423 {
1424 	struct l2cap_chan *chan;
1425 
1426 	BT_DBG("conn %p", conn);
1427 
1428 	mutex_lock(&conn->chan_lock);
1429 
1430 	list_for_each_entry(chan, &conn->chan_l, list) {
1431 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1432 			l2cap_chan_set_err(chan, err);
1433 	}
1434 
1435 	mutex_unlock(&conn->chan_lock);
1436 }
1437 
1438 static void l2cap_info_timeout(struct work_struct *work)
1439 {
1440 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1441 					       info_timer.work);
1442 
1443 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1444 	conn->info_ident = 0;
1445 
1446 	l2cap_conn_start(conn);
1447 }
1448 
1449 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1450 {
1451 	struct l2cap_conn *conn = hcon->l2cap_data;
1452 	struct l2cap_chan *chan, *l;
1453 
1454 	if (!conn)
1455 		return;
1456 
1457 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1458 
1459 	kfree_skb(conn->rx_skb);
1460 
1461 	mutex_lock(&conn->chan_lock);
1462 
1463 	/* Kill channels */
1464 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1465 		l2cap_chan_hold(chan);
1466 		l2cap_chan_lock(chan);
1467 
1468 		l2cap_chan_del(chan, err);
1469 
1470 		l2cap_chan_unlock(chan);
1471 
1472 		chan->ops->close(chan);
1473 		l2cap_chan_put(chan);
1474 	}
1475 
1476 	mutex_unlock(&conn->chan_lock);
1477 
1478 	hci_chan_del(conn->hchan);
1479 
1480 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1481 		cancel_delayed_work_sync(&conn->info_timer);
1482 
1483 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1484 		cancel_delayed_work_sync(&conn->security_timer);
1485 		smp_chan_destroy(conn);
1486 	}
1487 
1488 	hcon->l2cap_data = NULL;
1489 	kfree(conn);
1490 }
1491 
1492 static void security_timeout(struct work_struct *work)
1493 {
1494 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1495 					       security_timer.work);
1496 
1497 	BT_DBG("conn %p", conn);
1498 
1499 	if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1500 		smp_chan_destroy(conn);
1501 		l2cap_conn_del(conn->hcon, ETIMEDOUT);
1502 	}
1503 }
1504 
1505 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1506 {
1507 	struct l2cap_conn *conn = hcon->l2cap_data;
1508 	struct hci_chan *hchan;
1509 
1510 	if (conn || status)
1511 		return conn;
1512 
1513 	hchan = hci_chan_create(hcon);
1514 	if (!hchan)
1515 		return NULL;
1516 
1517 	conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1518 	if (!conn) {
1519 		hci_chan_del(hchan);
1520 		return NULL;
1521 	}
1522 
1523 	hcon->l2cap_data = conn;
1524 	conn->hcon = hcon;
1525 	conn->hchan = hchan;
1526 
1527 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1528 
1529 	switch (hcon->type) {
1530 	case AMP_LINK:
1531 		conn->mtu = hcon->hdev->block_mtu;
1532 		break;
1533 
1534 	case LE_LINK:
1535 		if (hcon->hdev->le_mtu) {
1536 			conn->mtu = hcon->hdev->le_mtu;
1537 			break;
1538 		}
1539 		/* fall through */
1540 
1541 	default:
1542 		conn->mtu = hcon->hdev->acl_mtu;
1543 		break;
1544 	}
1545 
1546 	conn->src = &hcon->hdev->bdaddr;
1547 	conn->dst = &hcon->dst;
1548 
1549 	conn->feat_mask = 0;
1550 
1551 	spin_lock_init(&conn->lock);
1552 	mutex_init(&conn->chan_lock);
1553 
1554 	INIT_LIST_HEAD(&conn->chan_l);
1555 
1556 	if (hcon->type == LE_LINK)
1557 		INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1558 	else
1559 		INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1560 
1561 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1562 
1563 	return conn;
1564 }
1565 
1566 /* ---- Socket interface ---- */
1567 
1568 /* Find socket with psm and source / destination bdaddr.
1569  * Returns closest match.
1570  */
1571 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1572 						   bdaddr_t *src,
1573 						   bdaddr_t *dst)
1574 {
1575 	struct l2cap_chan *c, *c1 = NULL;
1576 
1577 	read_lock(&chan_list_lock);
1578 
1579 	list_for_each_entry(c, &chan_list, global_l) {
1580 		struct sock *sk = c->sk;
1581 
1582 		if (state && c->state != state)
1583 			continue;
1584 
1585 		if (c->psm == psm) {
1586 			int src_match, dst_match;
1587 			int src_any, dst_any;
1588 
1589 			/* Exact match. */
1590 			src_match = !bacmp(&bt_sk(sk)->src, src);
1591 			dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1592 			if (src_match && dst_match) {
1593 				read_unlock(&chan_list_lock);
1594 				return c;
1595 			}
1596 
1597 			/* Closest match */
1598 			src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1599 			dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1600 			if ((src_match && dst_any) || (src_any && dst_match) ||
1601 			    (src_any && dst_any))
1602 				c1 = c;
1603 		}
1604 	}
1605 
1606 	read_unlock(&chan_list_lock);
1607 
1608 	return c1;
1609 }
1610 
1611 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1612 		       bdaddr_t *dst, u8 dst_type)
1613 {
1614 	struct sock *sk = chan->sk;
1615 	bdaddr_t *src = &bt_sk(sk)->src;
1616 	struct l2cap_conn *conn;
1617 	struct hci_conn *hcon;
1618 	struct hci_dev *hdev;
1619 	__u8 auth_type;
1620 	int err;
1621 
1622 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1623 	       dst_type, __le16_to_cpu(psm));
1624 
1625 	hdev = hci_get_route(dst, src);
1626 	if (!hdev)
1627 		return -EHOSTUNREACH;
1628 
1629 	hci_dev_lock(hdev);
1630 
1631 	l2cap_chan_lock(chan);
1632 
1633 	/* PSM must be odd and lsb of upper byte must be 0 */
1634 	if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1635 	    chan->chan_type != L2CAP_CHAN_RAW) {
1636 		err = -EINVAL;
1637 		goto done;
1638 	}
1639 
1640 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1641 		err = -EINVAL;
1642 		goto done;
1643 	}
1644 
1645 	switch (chan->mode) {
1646 	case L2CAP_MODE_BASIC:
1647 		break;
1648 	case L2CAP_MODE_ERTM:
1649 	case L2CAP_MODE_STREAMING:
1650 		if (!disable_ertm)
1651 			break;
1652 		/* fall through */
1653 	default:
1654 		err = -ENOTSUPP;
1655 		goto done;
1656 	}
1657 
1658 	switch (chan->state) {
1659 	case BT_CONNECT:
1660 	case BT_CONNECT2:
1661 	case BT_CONFIG:
1662 		/* Already connecting */
1663 		err = 0;
1664 		goto done;
1665 
1666 	case BT_CONNECTED:
1667 		/* Already connected */
1668 		err = -EISCONN;
1669 		goto done;
1670 
1671 	case BT_OPEN:
1672 	case BT_BOUND:
1673 		/* Can connect */
1674 		break;
1675 
1676 	default:
1677 		err = -EBADFD;
1678 		goto done;
1679 	}
1680 
1681 	/* Set destination address and psm */
1682 	lock_sock(sk);
1683 	bacpy(&bt_sk(sk)->dst, dst);
1684 	release_sock(sk);
1685 
1686 	chan->psm = psm;
1687 	chan->dcid = cid;
1688 
1689 	auth_type = l2cap_get_auth_type(chan);
1690 
1691 	if (chan->dcid == L2CAP_CID_LE_DATA)
1692 		hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1693 				   chan->sec_level, auth_type);
1694 	else
1695 		hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1696 				   chan->sec_level, auth_type);
1697 
1698 	if (IS_ERR(hcon)) {
1699 		err = PTR_ERR(hcon);
1700 		goto done;
1701 	}
1702 
1703 	conn = l2cap_conn_add(hcon, 0);
1704 	if (!conn) {
1705 		hci_conn_put(hcon);
1706 		err = -ENOMEM;
1707 		goto done;
1708 	}
1709 
1710 	if (hcon->type == LE_LINK) {
1711 		err = 0;
1712 
1713 		if (!list_empty(&conn->chan_l)) {
1714 			err = -EBUSY;
1715 			hci_conn_put(hcon);
1716 		}
1717 
1718 		if (err)
1719 			goto done;
1720 	}
1721 
1722 	/* Update source addr of the socket */
1723 	bacpy(src, conn->src);
1724 
1725 	l2cap_chan_unlock(chan);
1726 	l2cap_chan_add(conn, chan);
1727 	l2cap_chan_lock(chan);
1728 
1729 	l2cap_state_change(chan, BT_CONNECT);
1730 	__set_chan_timer(chan, sk->sk_sndtimeo);
1731 
1732 	if (hcon->state == BT_CONNECTED) {
1733 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1734 			__clear_chan_timer(chan);
1735 			if (l2cap_chan_check_security(chan))
1736 				l2cap_state_change(chan, BT_CONNECTED);
1737 		} else
1738 			l2cap_do_start(chan);
1739 	}
1740 
1741 	err = 0;
1742 
1743 done:
1744 	l2cap_chan_unlock(chan);
1745 	hci_dev_unlock(hdev);
1746 	hci_dev_put(hdev);
1747 	return err;
1748 }
1749 
1750 int __l2cap_wait_ack(struct sock *sk)
1751 {
1752 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1753 	DECLARE_WAITQUEUE(wait, current);
1754 	int err = 0;
1755 	int timeo = HZ/5;
1756 
1757 	add_wait_queue(sk_sleep(sk), &wait);
1758 	set_current_state(TASK_INTERRUPTIBLE);
1759 	while (chan->unacked_frames > 0 && chan->conn) {
1760 		if (!timeo)
1761 			timeo = HZ/5;
1762 
1763 		if (signal_pending(current)) {
1764 			err = sock_intr_errno(timeo);
1765 			break;
1766 		}
1767 
1768 		release_sock(sk);
1769 		timeo = schedule_timeout(timeo);
1770 		lock_sock(sk);
1771 		set_current_state(TASK_INTERRUPTIBLE);
1772 
1773 		err = sock_error(sk);
1774 		if (err)
1775 			break;
1776 	}
1777 	set_current_state(TASK_RUNNING);
1778 	remove_wait_queue(sk_sleep(sk), &wait);
1779 	return err;
1780 }
1781 
1782 static void l2cap_monitor_timeout(struct work_struct *work)
1783 {
1784 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1785 					       monitor_timer.work);
1786 
1787 	BT_DBG("chan %p", chan);
1788 
1789 	l2cap_chan_lock(chan);
1790 
1791 	if (!chan->conn) {
1792 		l2cap_chan_unlock(chan);
1793 		l2cap_chan_put(chan);
1794 		return;
1795 	}
1796 
1797 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1798 
1799 	l2cap_chan_unlock(chan);
1800 	l2cap_chan_put(chan);
1801 }
1802 
1803 static void l2cap_retrans_timeout(struct work_struct *work)
1804 {
1805 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1806 					       retrans_timer.work);
1807 
1808 	BT_DBG("chan %p", chan);
1809 
1810 	l2cap_chan_lock(chan);
1811 
1812 	if (!chan->conn) {
1813 		l2cap_chan_unlock(chan);
1814 		l2cap_chan_put(chan);
1815 		return;
1816 	}
1817 
1818 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1819 	l2cap_chan_unlock(chan);
1820 	l2cap_chan_put(chan);
1821 }
1822 
1823 static void l2cap_streaming_send(struct l2cap_chan *chan,
1824 				 struct sk_buff_head *skbs)
1825 {
1826 	struct sk_buff *skb;
1827 	struct l2cap_ctrl *control;
1828 
1829 	BT_DBG("chan %p, skbs %p", chan, skbs);
1830 
1831 	if (__chan_is_moving(chan))
1832 		return;
1833 
1834 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1835 
1836 	while (!skb_queue_empty(&chan->tx_q)) {
1837 
1838 		skb = skb_dequeue(&chan->tx_q);
1839 
1840 		bt_cb(skb)->control.retries = 1;
1841 		control = &bt_cb(skb)->control;
1842 
1843 		control->reqseq = 0;
1844 		control->txseq = chan->next_tx_seq;
1845 
1846 		__pack_control(chan, control, skb);
1847 
1848 		if (chan->fcs == L2CAP_FCS_CRC16) {
1849 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1850 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1851 		}
1852 
1853 		l2cap_do_send(chan, skb);
1854 
1855 		BT_DBG("Sent txseq %u", control->txseq);
1856 
1857 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1858 		chan->frames_sent++;
1859 	}
1860 }
1861 
1862 static int l2cap_ertm_send(struct l2cap_chan *chan)
1863 {
1864 	struct sk_buff *skb, *tx_skb;
1865 	struct l2cap_ctrl *control;
1866 	int sent = 0;
1867 
1868 	BT_DBG("chan %p", chan);
1869 
1870 	if (chan->state != BT_CONNECTED)
1871 		return -ENOTCONN;
1872 
1873 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1874 		return 0;
1875 
1876 	if (__chan_is_moving(chan))
1877 		return 0;
1878 
1879 	while (chan->tx_send_head &&
1880 	       chan->unacked_frames < chan->remote_tx_win &&
1881 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1882 
1883 		skb = chan->tx_send_head;
1884 
1885 		bt_cb(skb)->control.retries = 1;
1886 		control = &bt_cb(skb)->control;
1887 
1888 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1889 			control->final = 1;
1890 
1891 		control->reqseq = chan->buffer_seq;
1892 		chan->last_acked_seq = chan->buffer_seq;
1893 		control->txseq = chan->next_tx_seq;
1894 
1895 		__pack_control(chan, control, skb);
1896 
1897 		if (chan->fcs == L2CAP_FCS_CRC16) {
1898 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1899 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1900 		}
1901 
1902 		/* Clone after data has been modified. Data is assumed to be
1903 		   read-only (for locking purposes) on cloned sk_buffs.
1904 		 */
1905 		tx_skb = skb_clone(skb, GFP_KERNEL);
1906 
1907 		if (!tx_skb)
1908 			break;
1909 
1910 		__set_retrans_timer(chan);
1911 
1912 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1913 		chan->unacked_frames++;
1914 		chan->frames_sent++;
1915 		sent++;
1916 
1917 		if (skb_queue_is_last(&chan->tx_q, skb))
1918 			chan->tx_send_head = NULL;
1919 		else
1920 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1921 
1922 		l2cap_do_send(chan, tx_skb);
1923 		BT_DBG("Sent txseq %u", control->txseq);
1924 	}
1925 
1926 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1927 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1928 
1929 	return sent;
1930 }
1931 
1932 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1933 {
1934 	struct l2cap_ctrl control;
1935 	struct sk_buff *skb;
1936 	struct sk_buff *tx_skb;
1937 	u16 seq;
1938 
1939 	BT_DBG("chan %p", chan);
1940 
1941 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1942 		return;
1943 
1944 	if (__chan_is_moving(chan))
1945 		return;
1946 
1947 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1948 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1949 
1950 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1951 		if (!skb) {
1952 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1953 			       seq);
1954 			continue;
1955 		}
1956 
1957 		bt_cb(skb)->control.retries++;
1958 		control = bt_cb(skb)->control;
1959 
1960 		if (chan->max_tx != 0 &&
1961 		    bt_cb(skb)->control.retries > chan->max_tx) {
1962 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1963 			l2cap_send_disconn_req(chan, ECONNRESET);
1964 			l2cap_seq_list_clear(&chan->retrans_list);
1965 			break;
1966 		}
1967 
1968 		control.reqseq = chan->buffer_seq;
1969 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1970 			control.final = 1;
1971 		else
1972 			control.final = 0;
1973 
1974 		if (skb_cloned(skb)) {
1975 			/* Cloned sk_buffs are read-only, so we need a
1976 			 * writeable copy
1977 			 */
1978 			tx_skb = skb_copy(skb, GFP_KERNEL);
1979 		} else {
1980 			tx_skb = skb_clone(skb, GFP_KERNEL);
1981 		}
1982 
1983 		if (!tx_skb) {
1984 			l2cap_seq_list_clear(&chan->retrans_list);
1985 			break;
1986 		}
1987 
1988 		/* Update skb contents */
1989 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1990 			put_unaligned_le32(__pack_extended_control(&control),
1991 					   tx_skb->data + L2CAP_HDR_SIZE);
1992 		} else {
1993 			put_unaligned_le16(__pack_enhanced_control(&control),
1994 					   tx_skb->data + L2CAP_HDR_SIZE);
1995 		}
1996 
1997 		if (chan->fcs == L2CAP_FCS_CRC16) {
1998 			u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1999 			put_unaligned_le16(fcs, skb_put(tx_skb,
2000 							L2CAP_FCS_SIZE));
2001 		}
2002 
2003 		l2cap_do_send(chan, tx_skb);
2004 
2005 		BT_DBG("Resent txseq %d", control.txseq);
2006 
2007 		chan->last_acked_seq = chan->buffer_seq;
2008 	}
2009 }
2010 
2011 static void l2cap_retransmit(struct l2cap_chan *chan,
2012 			     struct l2cap_ctrl *control)
2013 {
2014 	BT_DBG("chan %p, control %p", chan, control);
2015 
2016 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2017 	l2cap_ertm_resend(chan);
2018 }
2019 
2020 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2021 				 struct l2cap_ctrl *control)
2022 {
2023 	struct sk_buff *skb;
2024 
2025 	BT_DBG("chan %p, control %p", chan, control);
2026 
2027 	if (control->poll)
2028 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2029 
2030 	l2cap_seq_list_clear(&chan->retrans_list);
2031 
2032 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2033 		return;
2034 
2035 	if (chan->unacked_frames) {
2036 		skb_queue_walk(&chan->tx_q, skb) {
2037 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2038 			    skb == chan->tx_send_head)
2039 				break;
2040 		}
2041 
2042 		skb_queue_walk_from(&chan->tx_q, skb) {
2043 			if (skb == chan->tx_send_head)
2044 				break;
2045 
2046 			l2cap_seq_list_append(&chan->retrans_list,
2047 					      bt_cb(skb)->control.txseq);
2048 		}
2049 
2050 		l2cap_ertm_resend(chan);
2051 	}
2052 }
2053 
2054 static void l2cap_send_ack(struct l2cap_chan *chan)
2055 {
2056 	struct l2cap_ctrl control;
2057 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2058 					 chan->last_acked_seq);
2059 	int threshold;
2060 
2061 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2062 	       chan, chan->last_acked_seq, chan->buffer_seq);
2063 
2064 	memset(&control, 0, sizeof(control));
2065 	control.sframe = 1;
2066 
2067 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2068 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2069 		__clear_ack_timer(chan);
2070 		control.super = L2CAP_SUPER_RNR;
2071 		control.reqseq = chan->buffer_seq;
2072 		l2cap_send_sframe(chan, &control);
2073 	} else {
2074 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2075 			l2cap_ertm_send(chan);
2076 			/* If any i-frames were sent, they included an ack */
2077 			if (chan->buffer_seq == chan->last_acked_seq)
2078 				frames_to_ack = 0;
2079 		}
2080 
2081 		/* Ack now if the window is 3/4ths full.
2082 		 * Calculate without mul or div
2083 		 */
2084 		threshold = chan->ack_win;
2085 		threshold += threshold << 1;
2086 		threshold >>= 2;
2087 
2088 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2089 		       threshold);
2090 
2091 		if (frames_to_ack >= threshold) {
2092 			__clear_ack_timer(chan);
2093 			control.super = L2CAP_SUPER_RR;
2094 			control.reqseq = chan->buffer_seq;
2095 			l2cap_send_sframe(chan, &control);
2096 			frames_to_ack = 0;
2097 		}
2098 
2099 		if (frames_to_ack)
2100 			__set_ack_timer(chan);
2101 	}
2102 }
2103 
2104 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2105 					 struct msghdr *msg, int len,
2106 					 int count, struct sk_buff *skb)
2107 {
2108 	struct l2cap_conn *conn = chan->conn;
2109 	struct sk_buff **frag;
2110 	int sent = 0;
2111 
2112 	if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
2113 		return -EFAULT;
2114 
2115 	sent += count;
2116 	len  -= count;
2117 
2118 	/* Continuation fragments (no L2CAP header) */
2119 	frag = &skb_shinfo(skb)->frag_list;
2120 	while (len) {
2121 		struct sk_buff *tmp;
2122 
2123 		count = min_t(unsigned int, conn->mtu, len);
2124 
2125 		tmp = chan->ops->alloc_skb(chan, count,
2126 					   msg->msg_flags & MSG_DONTWAIT);
2127 		if (IS_ERR(tmp))
2128 			return PTR_ERR(tmp);
2129 
2130 		*frag = tmp;
2131 
2132 		if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
2133 			return -EFAULT;
2134 
2135 		(*frag)->priority = skb->priority;
2136 
2137 		sent += count;
2138 		len  -= count;
2139 
2140 		skb->len += (*frag)->len;
2141 		skb->data_len += (*frag)->len;
2142 
2143 		frag = &(*frag)->next;
2144 	}
2145 
2146 	return sent;
2147 }
2148 
2149 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2150 						 struct msghdr *msg, size_t len,
2151 						 u32 priority)
2152 {
2153 	struct l2cap_conn *conn = chan->conn;
2154 	struct sk_buff *skb;
2155 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2156 	struct l2cap_hdr *lh;
2157 
2158 	BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2159 
2160 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2161 
2162 	skb = chan->ops->alloc_skb(chan, count + hlen,
2163 				   msg->msg_flags & MSG_DONTWAIT);
2164 	if (IS_ERR(skb))
2165 		return skb;
2166 
2167 	skb->priority = priority;
2168 
2169 	/* Create L2CAP header */
2170 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2171 	lh->cid = cpu_to_le16(chan->dcid);
2172 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2173 	put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2174 
2175 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2176 	if (unlikely(err < 0)) {
2177 		kfree_skb(skb);
2178 		return ERR_PTR(err);
2179 	}
2180 	return skb;
2181 }
2182 
2183 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2184 					      struct msghdr *msg, size_t len,
2185 					      u32 priority)
2186 {
2187 	struct l2cap_conn *conn = chan->conn;
2188 	struct sk_buff *skb;
2189 	int err, count;
2190 	struct l2cap_hdr *lh;
2191 
2192 	BT_DBG("chan %p len %zu", chan, len);
2193 
2194 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2195 
2196 	skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2197 				   msg->msg_flags & MSG_DONTWAIT);
2198 	if (IS_ERR(skb))
2199 		return skb;
2200 
2201 	skb->priority = priority;
2202 
2203 	/* Create L2CAP header */
2204 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 	lh->cid = cpu_to_le16(chan->dcid);
2206 	lh->len = cpu_to_le16(len);
2207 
2208 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2209 	if (unlikely(err < 0)) {
2210 		kfree_skb(skb);
2211 		return ERR_PTR(err);
2212 	}
2213 	return skb;
2214 }
2215 
2216 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2217 					       struct msghdr *msg, size_t len,
2218 					       u16 sdulen)
2219 {
2220 	struct l2cap_conn *conn = chan->conn;
2221 	struct sk_buff *skb;
2222 	int err, count, hlen;
2223 	struct l2cap_hdr *lh;
2224 
2225 	BT_DBG("chan %p len %zu", chan, len);
2226 
2227 	if (!conn)
2228 		return ERR_PTR(-ENOTCONN);
2229 
2230 	hlen = __ertm_hdr_size(chan);
2231 
2232 	if (sdulen)
2233 		hlen += L2CAP_SDULEN_SIZE;
2234 
2235 	if (chan->fcs == L2CAP_FCS_CRC16)
2236 		hlen += L2CAP_FCS_SIZE;
2237 
2238 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2239 
2240 	skb = chan->ops->alloc_skb(chan, count + hlen,
2241 				   msg->msg_flags & MSG_DONTWAIT);
2242 	if (IS_ERR(skb))
2243 		return skb;
2244 
2245 	/* Create L2CAP header */
2246 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2247 	lh->cid = cpu_to_le16(chan->dcid);
2248 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2249 
2250 	/* Control header is populated later */
2251 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2252 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2253 	else
2254 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2255 
2256 	if (sdulen)
2257 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2258 
2259 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2260 	if (unlikely(err < 0)) {
2261 		kfree_skb(skb);
2262 		return ERR_PTR(err);
2263 	}
2264 
2265 	bt_cb(skb)->control.fcs = chan->fcs;
2266 	bt_cb(skb)->control.retries = 0;
2267 	return skb;
2268 }
2269 
2270 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2271 			     struct sk_buff_head *seg_queue,
2272 			     struct msghdr *msg, size_t len)
2273 {
2274 	struct sk_buff *skb;
2275 	u16 sdu_len;
2276 	size_t pdu_len;
2277 	u8 sar;
2278 
2279 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2280 
2281 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2282 	 * so fragmented skbs are not used.  The HCI layer's handling
2283 	 * of fragmented skbs is not compatible with ERTM's queueing.
2284 	 */
2285 
2286 	/* PDU size is derived from the HCI MTU */
2287 	pdu_len = chan->conn->mtu;
2288 
2289 	/* Constrain PDU size for BR/EDR connections */
2290 	if (!chan->hs_hcon)
2291 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2292 
2293 	/* Adjust for largest possible L2CAP overhead. */
2294 	if (chan->fcs)
2295 		pdu_len -= L2CAP_FCS_SIZE;
2296 
2297 	pdu_len -= __ertm_hdr_size(chan);
2298 
2299 	/* Remote device may have requested smaller PDUs */
2300 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2301 
2302 	if (len <= pdu_len) {
2303 		sar = L2CAP_SAR_UNSEGMENTED;
2304 		sdu_len = 0;
2305 		pdu_len = len;
2306 	} else {
2307 		sar = L2CAP_SAR_START;
2308 		sdu_len = len;
2309 		pdu_len -= L2CAP_SDULEN_SIZE;
2310 	}
2311 
2312 	while (len > 0) {
2313 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2314 
2315 		if (IS_ERR(skb)) {
2316 			__skb_queue_purge(seg_queue);
2317 			return PTR_ERR(skb);
2318 		}
2319 
2320 		bt_cb(skb)->control.sar = sar;
2321 		__skb_queue_tail(seg_queue, skb);
2322 
2323 		len -= pdu_len;
2324 		if (sdu_len) {
2325 			sdu_len = 0;
2326 			pdu_len += L2CAP_SDULEN_SIZE;
2327 		}
2328 
2329 		if (len <= pdu_len) {
2330 			sar = L2CAP_SAR_END;
2331 			pdu_len = len;
2332 		} else {
2333 			sar = L2CAP_SAR_CONTINUE;
2334 		}
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2341 		    u32 priority)
2342 {
2343 	struct sk_buff *skb;
2344 	int err;
2345 	struct sk_buff_head seg_queue;
2346 
2347 	/* Connectionless channel */
2348 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2349 		skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2350 		if (IS_ERR(skb))
2351 			return PTR_ERR(skb);
2352 
2353 		l2cap_do_send(chan, skb);
2354 		return len;
2355 	}
2356 
2357 	switch (chan->mode) {
2358 	case L2CAP_MODE_BASIC:
2359 		/* Check outgoing MTU */
2360 		if (len > chan->omtu)
2361 			return -EMSGSIZE;
2362 
2363 		/* Create a basic PDU */
2364 		skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2365 		if (IS_ERR(skb))
2366 			return PTR_ERR(skb);
2367 
2368 		l2cap_do_send(chan, skb);
2369 		err = len;
2370 		break;
2371 
2372 	case L2CAP_MODE_ERTM:
2373 	case L2CAP_MODE_STREAMING:
2374 		/* Check outgoing MTU */
2375 		if (len > chan->omtu) {
2376 			err = -EMSGSIZE;
2377 			break;
2378 		}
2379 
2380 		__skb_queue_head_init(&seg_queue);
2381 
2382 		/* Do segmentation before calling in to the state machine,
2383 		 * since it's possible to block while waiting for memory
2384 		 * allocation.
2385 		 */
2386 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2387 
2388 		/* The channel could have been closed while segmenting,
2389 		 * check that it is still connected.
2390 		 */
2391 		if (chan->state != BT_CONNECTED) {
2392 			__skb_queue_purge(&seg_queue);
2393 			err = -ENOTCONN;
2394 		}
2395 
2396 		if (err)
2397 			break;
2398 
2399 		if (chan->mode == L2CAP_MODE_ERTM)
2400 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2401 		else
2402 			l2cap_streaming_send(chan, &seg_queue);
2403 
2404 		err = len;
2405 
2406 		/* If the skbs were not queued for sending, they'll still be in
2407 		 * seg_queue and need to be purged.
2408 		 */
2409 		__skb_queue_purge(&seg_queue);
2410 		break;
2411 
2412 	default:
2413 		BT_DBG("bad state %1.1x", chan->mode);
2414 		err = -EBADFD;
2415 	}
2416 
2417 	return err;
2418 }
2419 
2420 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2421 {
2422 	struct l2cap_ctrl control;
2423 	u16 seq;
2424 
2425 	BT_DBG("chan %p, txseq %u", chan, txseq);
2426 
2427 	memset(&control, 0, sizeof(control));
2428 	control.sframe = 1;
2429 	control.super = L2CAP_SUPER_SREJ;
2430 
2431 	for (seq = chan->expected_tx_seq; seq != txseq;
2432 	     seq = __next_seq(chan, seq)) {
2433 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2434 			control.reqseq = seq;
2435 			l2cap_send_sframe(chan, &control);
2436 			l2cap_seq_list_append(&chan->srej_list, seq);
2437 		}
2438 	}
2439 
2440 	chan->expected_tx_seq = __next_seq(chan, txseq);
2441 }
2442 
2443 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2444 {
2445 	struct l2cap_ctrl control;
2446 
2447 	BT_DBG("chan %p", chan);
2448 
2449 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2450 		return;
2451 
2452 	memset(&control, 0, sizeof(control));
2453 	control.sframe = 1;
2454 	control.super = L2CAP_SUPER_SREJ;
2455 	control.reqseq = chan->srej_list.tail;
2456 	l2cap_send_sframe(chan, &control);
2457 }
2458 
2459 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2460 {
2461 	struct l2cap_ctrl control;
2462 	u16 initial_head;
2463 	u16 seq;
2464 
2465 	BT_DBG("chan %p, txseq %u", chan, txseq);
2466 
2467 	memset(&control, 0, sizeof(control));
2468 	control.sframe = 1;
2469 	control.super = L2CAP_SUPER_SREJ;
2470 
2471 	/* Capture initial list head to allow only one pass through the list. */
2472 	initial_head = chan->srej_list.head;
2473 
2474 	do {
2475 		seq = l2cap_seq_list_pop(&chan->srej_list);
2476 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2477 			break;
2478 
2479 		control.reqseq = seq;
2480 		l2cap_send_sframe(chan, &control);
2481 		l2cap_seq_list_append(&chan->srej_list, seq);
2482 	} while (chan->srej_list.head != initial_head);
2483 }
2484 
2485 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2486 {
2487 	struct sk_buff *acked_skb;
2488 	u16 ackseq;
2489 
2490 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2491 
2492 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2493 		return;
2494 
2495 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2496 	       chan->expected_ack_seq, chan->unacked_frames);
2497 
2498 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2499 	     ackseq = __next_seq(chan, ackseq)) {
2500 
2501 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2502 		if (acked_skb) {
2503 			skb_unlink(acked_skb, &chan->tx_q);
2504 			kfree_skb(acked_skb);
2505 			chan->unacked_frames--;
2506 		}
2507 	}
2508 
2509 	chan->expected_ack_seq = reqseq;
2510 
2511 	if (chan->unacked_frames == 0)
2512 		__clear_retrans_timer(chan);
2513 
2514 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2515 }
2516 
2517 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2518 {
2519 	BT_DBG("chan %p", chan);
2520 
2521 	chan->expected_tx_seq = chan->buffer_seq;
2522 	l2cap_seq_list_clear(&chan->srej_list);
2523 	skb_queue_purge(&chan->srej_q);
2524 	chan->rx_state = L2CAP_RX_STATE_RECV;
2525 }
2526 
2527 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2528 				struct l2cap_ctrl *control,
2529 				struct sk_buff_head *skbs, u8 event)
2530 {
2531 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2532 	       event);
2533 
2534 	switch (event) {
2535 	case L2CAP_EV_DATA_REQUEST:
2536 		if (chan->tx_send_head == NULL)
2537 			chan->tx_send_head = skb_peek(skbs);
2538 
2539 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2540 		l2cap_ertm_send(chan);
2541 		break;
2542 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2543 		BT_DBG("Enter LOCAL_BUSY");
2544 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2545 
2546 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2547 			/* The SREJ_SENT state must be aborted if we are to
2548 			 * enter the LOCAL_BUSY state.
2549 			 */
2550 			l2cap_abort_rx_srej_sent(chan);
2551 		}
2552 
2553 		l2cap_send_ack(chan);
2554 
2555 		break;
2556 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2557 		BT_DBG("Exit LOCAL_BUSY");
2558 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2559 
2560 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2561 			struct l2cap_ctrl local_control;
2562 
2563 			memset(&local_control, 0, sizeof(local_control));
2564 			local_control.sframe = 1;
2565 			local_control.super = L2CAP_SUPER_RR;
2566 			local_control.poll = 1;
2567 			local_control.reqseq = chan->buffer_seq;
2568 			l2cap_send_sframe(chan, &local_control);
2569 
2570 			chan->retry_count = 1;
2571 			__set_monitor_timer(chan);
2572 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2573 		}
2574 		break;
2575 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2576 		l2cap_process_reqseq(chan, control->reqseq);
2577 		break;
2578 	case L2CAP_EV_EXPLICIT_POLL:
2579 		l2cap_send_rr_or_rnr(chan, 1);
2580 		chan->retry_count = 1;
2581 		__set_monitor_timer(chan);
2582 		__clear_ack_timer(chan);
2583 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2584 		break;
2585 	case L2CAP_EV_RETRANS_TO:
2586 		l2cap_send_rr_or_rnr(chan, 1);
2587 		chan->retry_count = 1;
2588 		__set_monitor_timer(chan);
2589 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2590 		break;
2591 	case L2CAP_EV_RECV_FBIT:
2592 		/* Nothing to process */
2593 		break;
2594 	default:
2595 		break;
2596 	}
2597 }
2598 
2599 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2600 				  struct l2cap_ctrl *control,
2601 				  struct sk_buff_head *skbs, u8 event)
2602 {
2603 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2604 	       event);
2605 
2606 	switch (event) {
2607 	case L2CAP_EV_DATA_REQUEST:
2608 		if (chan->tx_send_head == NULL)
2609 			chan->tx_send_head = skb_peek(skbs);
2610 		/* Queue data, but don't send. */
2611 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2612 		break;
2613 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2614 		BT_DBG("Enter LOCAL_BUSY");
2615 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2616 
2617 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2618 			/* The SREJ_SENT state must be aborted if we are to
2619 			 * enter the LOCAL_BUSY state.
2620 			 */
2621 			l2cap_abort_rx_srej_sent(chan);
2622 		}
2623 
2624 		l2cap_send_ack(chan);
2625 
2626 		break;
2627 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2628 		BT_DBG("Exit LOCAL_BUSY");
2629 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2630 
2631 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2632 			struct l2cap_ctrl local_control;
2633 			memset(&local_control, 0, sizeof(local_control));
2634 			local_control.sframe = 1;
2635 			local_control.super = L2CAP_SUPER_RR;
2636 			local_control.poll = 1;
2637 			local_control.reqseq = chan->buffer_seq;
2638 			l2cap_send_sframe(chan, &local_control);
2639 
2640 			chan->retry_count = 1;
2641 			__set_monitor_timer(chan);
2642 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2643 		}
2644 		break;
2645 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2646 		l2cap_process_reqseq(chan, control->reqseq);
2647 
2648 		/* Fall through */
2649 
2650 	case L2CAP_EV_RECV_FBIT:
2651 		if (control && control->final) {
2652 			__clear_monitor_timer(chan);
2653 			if (chan->unacked_frames > 0)
2654 				__set_retrans_timer(chan);
2655 			chan->retry_count = 0;
2656 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2657 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2658 		}
2659 		break;
2660 	case L2CAP_EV_EXPLICIT_POLL:
2661 		/* Ignore */
2662 		break;
2663 	case L2CAP_EV_MONITOR_TO:
2664 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2665 			l2cap_send_rr_or_rnr(chan, 1);
2666 			__set_monitor_timer(chan);
2667 			chan->retry_count++;
2668 		} else {
2669 			l2cap_send_disconn_req(chan, ECONNABORTED);
2670 		}
2671 		break;
2672 	default:
2673 		break;
2674 	}
2675 }
2676 
2677 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2678 		     struct sk_buff_head *skbs, u8 event)
2679 {
2680 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2681 	       chan, control, skbs, event, chan->tx_state);
2682 
2683 	switch (chan->tx_state) {
2684 	case L2CAP_TX_STATE_XMIT:
2685 		l2cap_tx_state_xmit(chan, control, skbs, event);
2686 		break;
2687 	case L2CAP_TX_STATE_WAIT_F:
2688 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2689 		break;
2690 	default:
2691 		/* Ignore event */
2692 		break;
2693 	}
2694 }
2695 
2696 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2697 			     struct l2cap_ctrl *control)
2698 {
2699 	BT_DBG("chan %p, control %p", chan, control);
2700 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2701 }
2702 
2703 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2704 				  struct l2cap_ctrl *control)
2705 {
2706 	BT_DBG("chan %p, control %p", chan, control);
2707 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2708 }
2709 
2710 /* Copy frame to all raw sockets on that connection */
2711 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2712 {
2713 	struct sk_buff *nskb;
2714 	struct l2cap_chan *chan;
2715 
2716 	BT_DBG("conn %p", conn);
2717 
2718 	mutex_lock(&conn->chan_lock);
2719 
2720 	list_for_each_entry(chan, &conn->chan_l, list) {
2721 		struct sock *sk = chan->sk;
2722 		if (chan->chan_type != L2CAP_CHAN_RAW)
2723 			continue;
2724 
2725 		/* Don't send frame to the socket it came from */
2726 		if (skb->sk == sk)
2727 			continue;
2728 		nskb = skb_clone(skb, GFP_KERNEL);
2729 		if (!nskb)
2730 			continue;
2731 
2732 		if (chan->ops->recv(chan, nskb))
2733 			kfree_skb(nskb);
2734 	}
2735 
2736 	mutex_unlock(&conn->chan_lock);
2737 }
2738 
2739 /* ---- L2CAP signalling commands ---- */
2740 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2741 				       u8 ident, u16 dlen, void *data)
2742 {
2743 	struct sk_buff *skb, **frag;
2744 	struct l2cap_cmd_hdr *cmd;
2745 	struct l2cap_hdr *lh;
2746 	int len, count;
2747 
2748 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2749 	       conn, code, ident, dlen);
2750 
2751 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2752 	count = min_t(unsigned int, conn->mtu, len);
2753 
2754 	skb = bt_skb_alloc(count, GFP_KERNEL);
2755 	if (!skb)
2756 		return NULL;
2757 
2758 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2759 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2760 
2761 	if (conn->hcon->type == LE_LINK)
2762 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2763 	else
2764 		lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2765 
2766 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2767 	cmd->code  = code;
2768 	cmd->ident = ident;
2769 	cmd->len   = cpu_to_le16(dlen);
2770 
2771 	if (dlen) {
2772 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2773 		memcpy(skb_put(skb, count), data, count);
2774 		data += count;
2775 	}
2776 
2777 	len -= skb->len;
2778 
2779 	/* Continuation fragments (no L2CAP header) */
2780 	frag = &skb_shinfo(skb)->frag_list;
2781 	while (len) {
2782 		count = min_t(unsigned int, conn->mtu, len);
2783 
2784 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2785 		if (!*frag)
2786 			goto fail;
2787 
2788 		memcpy(skb_put(*frag, count), data, count);
2789 
2790 		len  -= count;
2791 		data += count;
2792 
2793 		frag = &(*frag)->next;
2794 	}
2795 
2796 	return skb;
2797 
2798 fail:
2799 	kfree_skb(skb);
2800 	return NULL;
2801 }
2802 
2803 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2804 				     unsigned long *val)
2805 {
2806 	struct l2cap_conf_opt *opt = *ptr;
2807 	int len;
2808 
2809 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2810 	*ptr += len;
2811 
2812 	*type = opt->type;
2813 	*olen = opt->len;
2814 
2815 	switch (opt->len) {
2816 	case 1:
2817 		*val = *((u8 *) opt->val);
2818 		break;
2819 
2820 	case 2:
2821 		*val = get_unaligned_le16(opt->val);
2822 		break;
2823 
2824 	case 4:
2825 		*val = get_unaligned_le32(opt->val);
2826 		break;
2827 
2828 	default:
2829 		*val = (unsigned long) opt->val;
2830 		break;
2831 	}
2832 
2833 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2834 	return len;
2835 }
2836 
2837 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2838 {
2839 	struct l2cap_conf_opt *opt = *ptr;
2840 
2841 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2842 
2843 	opt->type = type;
2844 	opt->len  = len;
2845 
2846 	switch (len) {
2847 	case 1:
2848 		*((u8 *) opt->val)  = val;
2849 		break;
2850 
2851 	case 2:
2852 		put_unaligned_le16(val, opt->val);
2853 		break;
2854 
2855 	case 4:
2856 		put_unaligned_le32(val, opt->val);
2857 		break;
2858 
2859 	default:
2860 		memcpy(opt->val, (void *) val, len);
2861 		break;
2862 	}
2863 
2864 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2865 }
2866 
2867 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2868 {
2869 	struct l2cap_conf_efs efs;
2870 
2871 	switch (chan->mode) {
2872 	case L2CAP_MODE_ERTM:
2873 		efs.id		= chan->local_id;
2874 		efs.stype	= chan->local_stype;
2875 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2876 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2877 		efs.acc_lat	= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2878 		efs.flush_to	= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2879 		break;
2880 
2881 	case L2CAP_MODE_STREAMING:
2882 		efs.id		= 1;
2883 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2884 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2885 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2886 		efs.acc_lat	= 0;
2887 		efs.flush_to	= 0;
2888 		break;
2889 
2890 	default:
2891 		return;
2892 	}
2893 
2894 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2895 			   (unsigned long) &efs);
2896 }
2897 
2898 static void l2cap_ack_timeout(struct work_struct *work)
2899 {
2900 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2901 					       ack_timer.work);
2902 	u16 frames_to_ack;
2903 
2904 	BT_DBG("chan %p", chan);
2905 
2906 	l2cap_chan_lock(chan);
2907 
2908 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2909 				     chan->last_acked_seq);
2910 
2911 	if (frames_to_ack)
2912 		l2cap_send_rr_or_rnr(chan, 0);
2913 
2914 	l2cap_chan_unlock(chan);
2915 	l2cap_chan_put(chan);
2916 }
2917 
2918 int l2cap_ertm_init(struct l2cap_chan *chan)
2919 {
2920 	int err;
2921 
2922 	chan->next_tx_seq = 0;
2923 	chan->expected_tx_seq = 0;
2924 	chan->expected_ack_seq = 0;
2925 	chan->unacked_frames = 0;
2926 	chan->buffer_seq = 0;
2927 	chan->frames_sent = 0;
2928 	chan->last_acked_seq = 0;
2929 	chan->sdu = NULL;
2930 	chan->sdu_last_frag = NULL;
2931 	chan->sdu_len = 0;
2932 
2933 	skb_queue_head_init(&chan->tx_q);
2934 
2935 	chan->local_amp_id = 0;
2936 	chan->move_id = 0;
2937 	chan->move_state = L2CAP_MOVE_STABLE;
2938 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
2939 
2940 	if (chan->mode != L2CAP_MODE_ERTM)
2941 		return 0;
2942 
2943 	chan->rx_state = L2CAP_RX_STATE_RECV;
2944 	chan->tx_state = L2CAP_TX_STATE_XMIT;
2945 
2946 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2947 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2948 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2949 
2950 	skb_queue_head_init(&chan->srej_q);
2951 
2952 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2953 	if (err < 0)
2954 		return err;
2955 
2956 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2957 	if (err < 0)
2958 		l2cap_seq_list_free(&chan->srej_list);
2959 
2960 	return err;
2961 }
2962 
2963 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2964 {
2965 	switch (mode) {
2966 	case L2CAP_MODE_STREAMING:
2967 	case L2CAP_MODE_ERTM:
2968 		if (l2cap_mode_supported(mode, remote_feat_mask))
2969 			return mode;
2970 		/* fall through */
2971 	default:
2972 		return L2CAP_MODE_BASIC;
2973 	}
2974 }
2975 
2976 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2977 {
2978 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2979 }
2980 
2981 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2982 {
2983 	return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2984 }
2985 
2986 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
2987 				      struct l2cap_conf_rfc *rfc)
2988 {
2989 	if (chan->local_amp_id && chan->hs_hcon) {
2990 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
2991 
2992 		/* Class 1 devices have must have ERTM timeouts
2993 		 * exceeding the Link Supervision Timeout.  The
2994 		 * default Link Supervision Timeout for AMP
2995 		 * controllers is 10 seconds.
2996 		 *
2997 		 * Class 1 devices use 0xffffffff for their
2998 		 * best-effort flush timeout, so the clamping logic
2999 		 * will result in a timeout that meets the above
3000 		 * requirement.  ERTM timeouts are 16-bit values, so
3001 		 * the maximum timeout is 65.535 seconds.
3002 		 */
3003 
3004 		/* Convert timeout to milliseconds and round */
3005 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3006 
3007 		/* This is the recommended formula for class 2 devices
3008 		 * that start ERTM timers when packets are sent to the
3009 		 * controller.
3010 		 */
3011 		ertm_to = 3 * ertm_to + 500;
3012 
3013 		if (ertm_to > 0xffff)
3014 			ertm_to = 0xffff;
3015 
3016 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3017 		rfc->monitor_timeout = rfc->retrans_timeout;
3018 	} else {
3019 		rfc->retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3020 		rfc->monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3021 	}
3022 }
3023 
3024 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3025 {
3026 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3027 	    __l2cap_ews_supported(chan)) {
3028 		/* use extended control field */
3029 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3030 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3031 	} else {
3032 		chan->tx_win = min_t(u16, chan->tx_win,
3033 				     L2CAP_DEFAULT_TX_WINDOW);
3034 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3035 	}
3036 	chan->ack_win = chan->tx_win;
3037 }
3038 
3039 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
3040 {
3041 	struct l2cap_conf_req *req = data;
3042 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3043 	void *ptr = req->data;
3044 	u16 size;
3045 
3046 	BT_DBG("chan %p", chan);
3047 
3048 	if (chan->num_conf_req || chan->num_conf_rsp)
3049 		goto done;
3050 
3051 	switch (chan->mode) {
3052 	case L2CAP_MODE_STREAMING:
3053 	case L2CAP_MODE_ERTM:
3054 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3055 			break;
3056 
3057 		if (__l2cap_efs_supported(chan))
3058 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3059 
3060 		/* fall through */
3061 	default:
3062 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3063 		break;
3064 	}
3065 
3066 done:
3067 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3068 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3069 
3070 	switch (chan->mode) {
3071 	case L2CAP_MODE_BASIC:
3072 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3073 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3074 			break;
3075 
3076 		rfc.mode            = L2CAP_MODE_BASIC;
3077 		rfc.txwin_size      = 0;
3078 		rfc.max_transmit    = 0;
3079 		rfc.retrans_timeout = 0;
3080 		rfc.monitor_timeout = 0;
3081 		rfc.max_pdu_size    = 0;
3082 
3083 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3084 				   (unsigned long) &rfc);
3085 		break;
3086 
3087 	case L2CAP_MODE_ERTM:
3088 		rfc.mode            = L2CAP_MODE_ERTM;
3089 		rfc.max_transmit    = chan->max_tx;
3090 
3091 		__l2cap_set_ertm_timeouts(chan, &rfc);
3092 
3093 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3094 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3095 			     L2CAP_FCS_SIZE);
3096 		rfc.max_pdu_size = cpu_to_le16(size);
3097 
3098 		l2cap_txwin_setup(chan);
3099 
3100 		rfc.txwin_size = min_t(u16, chan->tx_win,
3101 				       L2CAP_DEFAULT_TX_WINDOW);
3102 
3103 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3104 				   (unsigned long) &rfc);
3105 
3106 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3107 			l2cap_add_opt_efs(&ptr, chan);
3108 
3109 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3110 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3111 					   chan->tx_win);
3112 
3113 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3114 			if (chan->fcs == L2CAP_FCS_NONE ||
3115 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3116 				chan->fcs = L2CAP_FCS_NONE;
3117 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3118 						   chan->fcs);
3119 			}
3120 		break;
3121 
3122 	case L2CAP_MODE_STREAMING:
3123 		l2cap_txwin_setup(chan);
3124 		rfc.mode            = L2CAP_MODE_STREAMING;
3125 		rfc.txwin_size      = 0;
3126 		rfc.max_transmit    = 0;
3127 		rfc.retrans_timeout = 0;
3128 		rfc.monitor_timeout = 0;
3129 
3130 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3131 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3132 			     L2CAP_FCS_SIZE);
3133 		rfc.max_pdu_size = cpu_to_le16(size);
3134 
3135 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3136 				   (unsigned long) &rfc);
3137 
3138 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3139 			l2cap_add_opt_efs(&ptr, chan);
3140 
3141 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3142 			if (chan->fcs == L2CAP_FCS_NONE ||
3143 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3144 				chan->fcs = L2CAP_FCS_NONE;
3145 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3146 						   chan->fcs);
3147 			}
3148 		break;
3149 	}
3150 
3151 	req->dcid  = cpu_to_le16(chan->dcid);
3152 	req->flags = __constant_cpu_to_le16(0);
3153 
3154 	return ptr - data;
3155 }
3156 
3157 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
3158 {
3159 	struct l2cap_conf_rsp *rsp = data;
3160 	void *ptr = rsp->data;
3161 	void *req = chan->conf_req;
3162 	int len = chan->conf_len;
3163 	int type, hint, olen;
3164 	unsigned long val;
3165 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3166 	struct l2cap_conf_efs efs;
3167 	u8 remote_efs = 0;
3168 	u16 mtu = L2CAP_DEFAULT_MTU;
3169 	u16 result = L2CAP_CONF_SUCCESS;
3170 	u16 size;
3171 
3172 	BT_DBG("chan %p", chan);
3173 
3174 	while (len >= L2CAP_CONF_OPT_SIZE) {
3175 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3176 
3177 		hint  = type & L2CAP_CONF_HINT;
3178 		type &= L2CAP_CONF_MASK;
3179 
3180 		switch (type) {
3181 		case L2CAP_CONF_MTU:
3182 			mtu = val;
3183 			break;
3184 
3185 		case L2CAP_CONF_FLUSH_TO:
3186 			chan->flush_to = val;
3187 			break;
3188 
3189 		case L2CAP_CONF_QOS:
3190 			break;
3191 
3192 		case L2CAP_CONF_RFC:
3193 			if (olen == sizeof(rfc))
3194 				memcpy(&rfc, (void *) val, olen);
3195 			break;
3196 
3197 		case L2CAP_CONF_FCS:
3198 			if (val == L2CAP_FCS_NONE)
3199 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3200 			break;
3201 
3202 		case L2CAP_CONF_EFS:
3203 			remote_efs = 1;
3204 			if (olen == sizeof(efs))
3205 				memcpy(&efs, (void *) val, olen);
3206 			break;
3207 
3208 		case L2CAP_CONF_EWS:
3209 			if (!enable_hs)
3210 				return -ECONNREFUSED;
3211 
3212 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3213 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3214 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 			chan->remote_tx_win = val;
3216 			break;
3217 
3218 		default:
3219 			if (hint)
3220 				break;
3221 
3222 			result = L2CAP_CONF_UNKNOWN;
3223 			*((u8 *) ptr++) = type;
3224 			break;
3225 		}
3226 	}
3227 
3228 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3229 		goto done;
3230 
3231 	switch (chan->mode) {
3232 	case L2CAP_MODE_STREAMING:
3233 	case L2CAP_MODE_ERTM:
3234 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3235 			chan->mode = l2cap_select_mode(rfc.mode,
3236 						       chan->conn->feat_mask);
3237 			break;
3238 		}
3239 
3240 		if (remote_efs) {
3241 			if (__l2cap_efs_supported(chan))
3242 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3243 			else
3244 				return -ECONNREFUSED;
3245 		}
3246 
3247 		if (chan->mode != rfc.mode)
3248 			return -ECONNREFUSED;
3249 
3250 		break;
3251 	}
3252 
3253 done:
3254 	if (chan->mode != rfc.mode) {
3255 		result = L2CAP_CONF_UNACCEPT;
3256 		rfc.mode = chan->mode;
3257 
3258 		if (chan->num_conf_rsp == 1)
3259 			return -ECONNREFUSED;
3260 
3261 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3262 				   (unsigned long) &rfc);
3263 	}
3264 
3265 	if (result == L2CAP_CONF_SUCCESS) {
3266 		/* Configure output options and let the other side know
3267 		 * which ones we don't like. */
3268 
3269 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3270 			result = L2CAP_CONF_UNACCEPT;
3271 		else {
3272 			chan->omtu = mtu;
3273 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3274 		}
3275 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3276 
3277 		if (remote_efs) {
3278 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3279 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3280 			    efs.stype != chan->local_stype) {
3281 
3282 				result = L2CAP_CONF_UNACCEPT;
3283 
3284 				if (chan->num_conf_req >= 1)
3285 					return -ECONNREFUSED;
3286 
3287 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3288 						   sizeof(efs),
3289 						   (unsigned long) &efs);
3290 			} else {
3291 				/* Send PENDING Conf Rsp */
3292 				result = L2CAP_CONF_PENDING;
3293 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3294 			}
3295 		}
3296 
3297 		switch (rfc.mode) {
3298 		case L2CAP_MODE_BASIC:
3299 			chan->fcs = L2CAP_FCS_NONE;
3300 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3301 			break;
3302 
3303 		case L2CAP_MODE_ERTM:
3304 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3305 				chan->remote_tx_win = rfc.txwin_size;
3306 			else
3307 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3308 
3309 			chan->remote_max_tx = rfc.max_transmit;
3310 
3311 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3312 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3313 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3314 			rfc.max_pdu_size = cpu_to_le16(size);
3315 			chan->remote_mps = size;
3316 
3317 			__l2cap_set_ertm_timeouts(chan, &rfc);
3318 
3319 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3320 
3321 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3322 					   sizeof(rfc), (unsigned long) &rfc);
3323 
3324 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3325 				chan->remote_id = efs.id;
3326 				chan->remote_stype = efs.stype;
3327 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3328 				chan->remote_flush_to =
3329 					le32_to_cpu(efs.flush_to);
3330 				chan->remote_acc_lat =
3331 					le32_to_cpu(efs.acc_lat);
3332 				chan->remote_sdu_itime =
3333 					le32_to_cpu(efs.sdu_itime);
3334 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3335 						   sizeof(efs),
3336 						   (unsigned long) &efs);
3337 			}
3338 			break;
3339 
3340 		case L2CAP_MODE_STREAMING:
3341 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3342 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3343 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3344 			rfc.max_pdu_size = cpu_to_le16(size);
3345 			chan->remote_mps = size;
3346 
3347 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3348 
3349 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3350 					   (unsigned long) &rfc);
3351 
3352 			break;
3353 
3354 		default:
3355 			result = L2CAP_CONF_UNACCEPT;
3356 
3357 			memset(&rfc, 0, sizeof(rfc));
3358 			rfc.mode = chan->mode;
3359 		}
3360 
3361 		if (result == L2CAP_CONF_SUCCESS)
3362 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3363 	}
3364 	rsp->scid   = cpu_to_le16(chan->dcid);
3365 	rsp->result = cpu_to_le16(result);
3366 	rsp->flags  = __constant_cpu_to_le16(0);
3367 
3368 	return ptr - data;
3369 }
3370 
3371 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3372 				void *data, u16 *result)
3373 {
3374 	struct l2cap_conf_req *req = data;
3375 	void *ptr = req->data;
3376 	int type, olen;
3377 	unsigned long val;
3378 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3379 	struct l2cap_conf_efs efs;
3380 
3381 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3382 
3383 	while (len >= L2CAP_CONF_OPT_SIZE) {
3384 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3385 
3386 		switch (type) {
3387 		case L2CAP_CONF_MTU:
3388 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3389 				*result = L2CAP_CONF_UNACCEPT;
3390 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3391 			} else
3392 				chan->imtu = val;
3393 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3394 			break;
3395 
3396 		case L2CAP_CONF_FLUSH_TO:
3397 			chan->flush_to = val;
3398 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3399 					   2, chan->flush_to);
3400 			break;
3401 
3402 		case L2CAP_CONF_RFC:
3403 			if (olen == sizeof(rfc))
3404 				memcpy(&rfc, (void *)val, olen);
3405 
3406 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3407 			    rfc.mode != chan->mode)
3408 				return -ECONNREFUSED;
3409 
3410 			chan->fcs = 0;
3411 
3412 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3413 					   sizeof(rfc), (unsigned long) &rfc);
3414 			break;
3415 
3416 		case L2CAP_CONF_EWS:
3417 			chan->ack_win = min_t(u16, val, chan->ack_win);
3418 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3419 					   chan->tx_win);
3420 			break;
3421 
3422 		case L2CAP_CONF_EFS:
3423 			if (olen == sizeof(efs))
3424 				memcpy(&efs, (void *)val, olen);
3425 
3426 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3427 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3428 			    efs.stype != chan->local_stype)
3429 				return -ECONNREFUSED;
3430 
3431 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3432 					   (unsigned long) &efs);
3433 			break;
3434 
3435 		case L2CAP_CONF_FCS:
3436 			if (*result == L2CAP_CONF_PENDING)
3437 				if (val == L2CAP_FCS_NONE)
3438 					set_bit(CONF_RECV_NO_FCS,
3439 						&chan->conf_state);
3440 			break;
3441 		}
3442 	}
3443 
3444 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3445 		return -ECONNREFUSED;
3446 
3447 	chan->mode = rfc.mode;
3448 
3449 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3450 		switch (rfc.mode) {
3451 		case L2CAP_MODE_ERTM:
3452 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3453 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3454 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3455 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3456 				chan->ack_win = min_t(u16, chan->ack_win,
3457 						      rfc.txwin_size);
3458 
3459 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3460 				chan->local_msdu = le16_to_cpu(efs.msdu);
3461 				chan->local_sdu_itime =
3462 					le32_to_cpu(efs.sdu_itime);
3463 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3464 				chan->local_flush_to =
3465 					le32_to_cpu(efs.flush_to);
3466 			}
3467 			break;
3468 
3469 		case L2CAP_MODE_STREAMING:
3470 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3471 		}
3472 	}
3473 
3474 	req->dcid   = cpu_to_le16(chan->dcid);
3475 	req->flags  = __constant_cpu_to_le16(0);
3476 
3477 	return ptr - data;
3478 }
3479 
3480 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3481 				u16 result, u16 flags)
3482 {
3483 	struct l2cap_conf_rsp *rsp = data;
3484 	void *ptr = rsp->data;
3485 
3486 	BT_DBG("chan %p", chan);
3487 
3488 	rsp->scid   = cpu_to_le16(chan->dcid);
3489 	rsp->result = cpu_to_le16(result);
3490 	rsp->flags  = cpu_to_le16(flags);
3491 
3492 	return ptr - data;
3493 }
3494 
3495 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3496 {
3497 	struct l2cap_conn_rsp rsp;
3498 	struct l2cap_conn *conn = chan->conn;
3499 	u8 buf[128];
3500 	u8 rsp_code;
3501 
3502 	rsp.scid   = cpu_to_le16(chan->dcid);
3503 	rsp.dcid   = cpu_to_le16(chan->scid);
3504 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3505 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3506 
3507 	if (chan->hs_hcon)
3508 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3509 	else
3510 		rsp_code = L2CAP_CONN_RSP;
3511 
3512 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3513 
3514 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3515 
3516 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3517 		return;
3518 
3519 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3520 		       l2cap_build_conf_req(chan, buf), buf);
3521 	chan->num_conf_req++;
3522 }
3523 
3524 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3525 {
3526 	int type, olen;
3527 	unsigned long val;
3528 	/* Use sane default values in case a misbehaving remote device
3529 	 * did not send an RFC or extended window size option.
3530 	 */
3531 	u16 txwin_ext = chan->ack_win;
3532 	struct l2cap_conf_rfc rfc = {
3533 		.mode = chan->mode,
3534 		.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3535 		.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3536 		.max_pdu_size = cpu_to_le16(chan->imtu),
3537 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3538 	};
3539 
3540 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3541 
3542 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3543 		return;
3544 
3545 	while (len >= L2CAP_CONF_OPT_SIZE) {
3546 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3547 
3548 		switch (type) {
3549 		case L2CAP_CONF_RFC:
3550 			if (olen == sizeof(rfc))
3551 				memcpy(&rfc, (void *)val, olen);
3552 			break;
3553 		case L2CAP_CONF_EWS:
3554 			txwin_ext = val;
3555 			break;
3556 		}
3557 	}
3558 
3559 	switch (rfc.mode) {
3560 	case L2CAP_MODE_ERTM:
3561 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3564 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3566 		else
3567 			chan->ack_win = min_t(u16, chan->ack_win,
3568 					      rfc.txwin_size);
3569 		break;
3570 	case L2CAP_MODE_STREAMING:
3571 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3572 	}
3573 }
3574 
3575 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3576 				    struct l2cap_cmd_hdr *cmd, u8 *data)
3577 {
3578 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3579 
3580 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3581 		return 0;
3582 
3583 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3584 	    cmd->ident == conn->info_ident) {
3585 		cancel_delayed_work(&conn->info_timer);
3586 
3587 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3588 		conn->info_ident = 0;
3589 
3590 		l2cap_conn_start(conn);
3591 	}
3592 
3593 	return 0;
3594 }
3595 
3596 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3597 					struct l2cap_cmd_hdr *cmd,
3598 					u8 *data, u8 rsp_code, u8 amp_id)
3599 {
3600 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3601 	struct l2cap_conn_rsp rsp;
3602 	struct l2cap_chan *chan = NULL, *pchan;
3603 	struct sock *parent, *sk = NULL;
3604 	int result, status = L2CAP_CS_NO_INFO;
3605 
3606 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3607 	__le16 psm = req->psm;
3608 
3609 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3610 
3611 	/* Check if we have socket listening on psm */
3612 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3613 	if (!pchan) {
3614 		result = L2CAP_CR_BAD_PSM;
3615 		goto sendresp;
3616 	}
3617 
3618 	parent = pchan->sk;
3619 
3620 	mutex_lock(&conn->chan_lock);
3621 	lock_sock(parent);
3622 
3623 	/* Check if the ACL is secure enough (if not SDP) */
3624 	if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3625 	    !hci_conn_check_link_mode(conn->hcon)) {
3626 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3627 		result = L2CAP_CR_SEC_BLOCK;
3628 		goto response;
3629 	}
3630 
3631 	result = L2CAP_CR_NO_MEM;
3632 
3633 	/* Check if we already have channel with that dcid */
3634 	if (__l2cap_get_chan_by_dcid(conn, scid))
3635 		goto response;
3636 
3637 	chan = pchan->ops->new_connection(pchan);
3638 	if (!chan)
3639 		goto response;
3640 
3641 	sk = chan->sk;
3642 
3643 	hci_conn_hold(conn->hcon);
3644 
3645 	bacpy(&bt_sk(sk)->src, conn->src);
3646 	bacpy(&bt_sk(sk)->dst, conn->dst);
3647 	chan->psm  = psm;
3648 	chan->dcid = scid;
3649 	chan->local_amp_id = amp_id;
3650 
3651 	__l2cap_chan_add(conn, chan);
3652 
3653 	dcid = chan->scid;
3654 
3655 	__set_chan_timer(chan, sk->sk_sndtimeo);
3656 
3657 	chan->ident = cmd->ident;
3658 
3659 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3660 		if (l2cap_chan_check_security(chan)) {
3661 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3662 				__l2cap_state_change(chan, BT_CONNECT2);
3663 				result = L2CAP_CR_PEND;
3664 				status = L2CAP_CS_AUTHOR_PEND;
3665 				chan->ops->defer(chan);
3666 			} else {
3667 				/* Force pending result for AMP controllers.
3668 				 * The connection will succeed after the
3669 				 * physical link is up.
3670 				 */
3671 				if (amp_id) {
3672 					__l2cap_state_change(chan, BT_CONNECT2);
3673 					result = L2CAP_CR_PEND;
3674 				} else {
3675 					__l2cap_state_change(chan, BT_CONFIG);
3676 					result = L2CAP_CR_SUCCESS;
3677 				}
3678 				status = L2CAP_CS_NO_INFO;
3679 			}
3680 		} else {
3681 			__l2cap_state_change(chan, BT_CONNECT2);
3682 			result = L2CAP_CR_PEND;
3683 			status = L2CAP_CS_AUTHEN_PEND;
3684 		}
3685 	} else {
3686 		__l2cap_state_change(chan, BT_CONNECT2);
3687 		result = L2CAP_CR_PEND;
3688 		status = L2CAP_CS_NO_INFO;
3689 	}
3690 
3691 response:
3692 	release_sock(parent);
3693 	mutex_unlock(&conn->chan_lock);
3694 
3695 sendresp:
3696 	rsp.scid   = cpu_to_le16(scid);
3697 	rsp.dcid   = cpu_to_le16(dcid);
3698 	rsp.result = cpu_to_le16(result);
3699 	rsp.status = cpu_to_le16(status);
3700 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3701 
3702 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3703 		struct l2cap_info_req info;
3704 		info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3705 
3706 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3707 		conn->info_ident = l2cap_get_ident(conn);
3708 
3709 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3710 
3711 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3712 			       sizeof(info), &info);
3713 	}
3714 
3715 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3716 	    result == L2CAP_CR_SUCCESS) {
3717 		u8 buf[128];
3718 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3719 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3720 			       l2cap_build_conf_req(chan, buf), buf);
3721 		chan->num_conf_req++;
3722 	}
3723 
3724 	return chan;
3725 }
3726 
3727 static int l2cap_connect_req(struct l2cap_conn *conn,
3728 			     struct l2cap_cmd_hdr *cmd, u8 *data)
3729 {
3730 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3731 	return 0;
3732 }
3733 
3734 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3735 				    struct l2cap_cmd_hdr *cmd, u8 *data)
3736 {
3737 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3738 	u16 scid, dcid, result, status;
3739 	struct l2cap_chan *chan;
3740 	u8 req[128];
3741 	int err;
3742 
3743 	scid   = __le16_to_cpu(rsp->scid);
3744 	dcid   = __le16_to_cpu(rsp->dcid);
3745 	result = __le16_to_cpu(rsp->result);
3746 	status = __le16_to_cpu(rsp->status);
3747 
3748 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3749 	       dcid, scid, result, status);
3750 
3751 	mutex_lock(&conn->chan_lock);
3752 
3753 	if (scid) {
3754 		chan = __l2cap_get_chan_by_scid(conn, scid);
3755 		if (!chan) {
3756 			err = -EFAULT;
3757 			goto unlock;
3758 		}
3759 	} else {
3760 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3761 		if (!chan) {
3762 			err = -EFAULT;
3763 			goto unlock;
3764 		}
3765 	}
3766 
3767 	err = 0;
3768 
3769 	l2cap_chan_lock(chan);
3770 
3771 	switch (result) {
3772 	case L2CAP_CR_SUCCESS:
3773 		l2cap_state_change(chan, BT_CONFIG);
3774 		chan->ident = 0;
3775 		chan->dcid = dcid;
3776 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3777 
3778 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3779 			break;
3780 
3781 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3782 			       l2cap_build_conf_req(chan, req), req);
3783 		chan->num_conf_req++;
3784 		break;
3785 
3786 	case L2CAP_CR_PEND:
3787 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3788 		break;
3789 
3790 	default:
3791 		l2cap_chan_del(chan, ECONNREFUSED);
3792 		break;
3793 	}
3794 
3795 	l2cap_chan_unlock(chan);
3796 
3797 unlock:
3798 	mutex_unlock(&conn->chan_lock);
3799 
3800 	return err;
3801 }
3802 
3803 static inline void set_default_fcs(struct l2cap_chan *chan)
3804 {
3805 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3806 	 * sides request it.
3807 	 */
3808 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3809 		chan->fcs = L2CAP_FCS_NONE;
3810 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3811 		chan->fcs = L2CAP_FCS_CRC16;
3812 }
3813 
3814 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3815 				    u8 ident, u16 flags)
3816 {
3817 	struct l2cap_conn *conn = chan->conn;
3818 
3819 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3820 	       flags);
3821 
3822 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3823 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3824 
3825 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3826 		       l2cap_build_conf_rsp(chan, data,
3827 					    L2CAP_CONF_SUCCESS, flags), data);
3828 }
3829 
3830 static inline int l2cap_config_req(struct l2cap_conn *conn,
3831 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3832 				   u8 *data)
3833 {
3834 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3835 	u16 dcid, flags;
3836 	u8 rsp[64];
3837 	struct l2cap_chan *chan;
3838 	int len, err = 0;
3839 
3840 	dcid  = __le16_to_cpu(req->dcid);
3841 	flags = __le16_to_cpu(req->flags);
3842 
3843 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3844 
3845 	chan = l2cap_get_chan_by_scid(conn, dcid);
3846 	if (!chan)
3847 		return -ENOENT;
3848 
3849 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3850 		struct l2cap_cmd_rej_cid rej;
3851 
3852 		rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3853 		rej.scid = cpu_to_le16(chan->scid);
3854 		rej.dcid = cpu_to_le16(chan->dcid);
3855 
3856 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3857 			       sizeof(rej), &rej);
3858 		goto unlock;
3859 	}
3860 
3861 	/* Reject if config buffer is too small. */
3862 	len = cmd_len - sizeof(*req);
3863 	if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3864 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3865 			       l2cap_build_conf_rsp(chan, rsp,
3866 			       L2CAP_CONF_REJECT, flags), rsp);
3867 		goto unlock;
3868 	}
3869 
3870 	/* Store config. */
3871 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
3872 	chan->conf_len += len;
3873 
3874 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3875 		/* Incomplete config. Send empty response. */
3876 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3877 			       l2cap_build_conf_rsp(chan, rsp,
3878 			       L2CAP_CONF_SUCCESS, flags), rsp);
3879 		goto unlock;
3880 	}
3881 
3882 	/* Complete config. */
3883 	len = l2cap_parse_conf_req(chan, rsp);
3884 	if (len < 0) {
3885 		l2cap_send_disconn_req(chan, ECONNRESET);
3886 		goto unlock;
3887 	}
3888 
3889 	chan->ident = cmd->ident;
3890 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3891 	chan->num_conf_rsp++;
3892 
3893 	/* Reset config buffer. */
3894 	chan->conf_len = 0;
3895 
3896 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3897 		goto unlock;
3898 
3899 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3900 		set_default_fcs(chan);
3901 
3902 		if (chan->mode == L2CAP_MODE_ERTM ||
3903 		    chan->mode == L2CAP_MODE_STREAMING)
3904 			err = l2cap_ertm_init(chan);
3905 
3906 		if (err < 0)
3907 			l2cap_send_disconn_req(chan, -err);
3908 		else
3909 			l2cap_chan_ready(chan);
3910 
3911 		goto unlock;
3912 	}
3913 
3914 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3915 		u8 buf[64];
3916 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3917 			       l2cap_build_conf_req(chan, buf), buf);
3918 		chan->num_conf_req++;
3919 	}
3920 
3921 	/* Got Conf Rsp PENDING from remote side and asume we sent
3922 	   Conf Rsp PENDING in the code above */
3923 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3924 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3925 
3926 		/* check compatibility */
3927 
3928 		/* Send rsp for BR/EDR channel */
3929 		if (!chan->hs_hcon)
3930 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3931 		else
3932 			chan->ident = cmd->ident;
3933 	}
3934 
3935 unlock:
3936 	l2cap_chan_unlock(chan);
3937 	return err;
3938 }
3939 
3940 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3941 				   struct l2cap_cmd_hdr *cmd, u8 *data)
3942 {
3943 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3944 	u16 scid, flags, result;
3945 	struct l2cap_chan *chan;
3946 	int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3947 	int err = 0;
3948 
3949 	scid   = __le16_to_cpu(rsp->scid);
3950 	flags  = __le16_to_cpu(rsp->flags);
3951 	result = __le16_to_cpu(rsp->result);
3952 
3953 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3954 	       result, len);
3955 
3956 	chan = l2cap_get_chan_by_scid(conn, scid);
3957 	if (!chan)
3958 		return 0;
3959 
3960 	switch (result) {
3961 	case L2CAP_CONF_SUCCESS:
3962 		l2cap_conf_rfc_get(chan, rsp->data, len);
3963 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3964 		break;
3965 
3966 	case L2CAP_CONF_PENDING:
3967 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3968 
3969 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3970 			char buf[64];
3971 
3972 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3973 						   buf, &result);
3974 			if (len < 0) {
3975 				l2cap_send_disconn_req(chan, ECONNRESET);
3976 				goto done;
3977 			}
3978 
3979 			if (!chan->hs_hcon) {
3980 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
3981 							0);
3982 			} else {
3983 				if (l2cap_check_efs(chan)) {
3984 					amp_create_logical_link(chan);
3985 					chan->ident = cmd->ident;
3986 				}
3987 			}
3988 		}
3989 		goto done;
3990 
3991 	case L2CAP_CONF_UNACCEPT:
3992 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3993 			char req[64];
3994 
3995 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3996 				l2cap_send_disconn_req(chan, ECONNRESET);
3997 				goto done;
3998 			}
3999 
4000 			/* throw out any old stored conf requests */
4001 			result = L2CAP_CONF_SUCCESS;
4002 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4003 						   req, &result);
4004 			if (len < 0) {
4005 				l2cap_send_disconn_req(chan, ECONNRESET);
4006 				goto done;
4007 			}
4008 
4009 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4010 				       L2CAP_CONF_REQ, len, req);
4011 			chan->num_conf_req++;
4012 			if (result != L2CAP_CONF_SUCCESS)
4013 				goto done;
4014 			break;
4015 		}
4016 
4017 	default:
4018 		l2cap_chan_set_err(chan, ECONNRESET);
4019 
4020 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4021 		l2cap_send_disconn_req(chan, ECONNRESET);
4022 		goto done;
4023 	}
4024 
4025 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4026 		goto done;
4027 
4028 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4029 
4030 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4031 		set_default_fcs(chan);
4032 
4033 		if (chan->mode == L2CAP_MODE_ERTM ||
4034 		    chan->mode == L2CAP_MODE_STREAMING)
4035 			err = l2cap_ertm_init(chan);
4036 
4037 		if (err < 0)
4038 			l2cap_send_disconn_req(chan, -err);
4039 		else
4040 			l2cap_chan_ready(chan);
4041 	}
4042 
4043 done:
4044 	l2cap_chan_unlock(chan);
4045 	return err;
4046 }
4047 
4048 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4049 				       struct l2cap_cmd_hdr *cmd, u8 *data)
4050 {
4051 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4052 	struct l2cap_disconn_rsp rsp;
4053 	u16 dcid, scid;
4054 	struct l2cap_chan *chan;
4055 	struct sock *sk;
4056 
4057 	scid = __le16_to_cpu(req->scid);
4058 	dcid = __le16_to_cpu(req->dcid);
4059 
4060 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4061 
4062 	mutex_lock(&conn->chan_lock);
4063 
4064 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4065 	if (!chan) {
4066 		mutex_unlock(&conn->chan_lock);
4067 		return 0;
4068 	}
4069 
4070 	l2cap_chan_lock(chan);
4071 
4072 	sk = chan->sk;
4073 
4074 	rsp.dcid = cpu_to_le16(chan->scid);
4075 	rsp.scid = cpu_to_le16(chan->dcid);
4076 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4077 
4078 	lock_sock(sk);
4079 	sk->sk_shutdown = SHUTDOWN_MASK;
4080 	release_sock(sk);
4081 
4082 	l2cap_chan_hold(chan);
4083 	l2cap_chan_del(chan, ECONNRESET);
4084 
4085 	l2cap_chan_unlock(chan);
4086 
4087 	chan->ops->close(chan);
4088 	l2cap_chan_put(chan);
4089 
4090 	mutex_unlock(&conn->chan_lock);
4091 
4092 	return 0;
4093 }
4094 
4095 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4096 				       struct l2cap_cmd_hdr *cmd, u8 *data)
4097 {
4098 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4099 	u16 dcid, scid;
4100 	struct l2cap_chan *chan;
4101 
4102 	scid = __le16_to_cpu(rsp->scid);
4103 	dcid = __le16_to_cpu(rsp->dcid);
4104 
4105 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4106 
4107 	mutex_lock(&conn->chan_lock);
4108 
4109 	chan = __l2cap_get_chan_by_scid(conn, scid);
4110 	if (!chan) {
4111 		mutex_unlock(&conn->chan_lock);
4112 		return 0;
4113 	}
4114 
4115 	l2cap_chan_lock(chan);
4116 
4117 	l2cap_chan_hold(chan);
4118 	l2cap_chan_del(chan, 0);
4119 
4120 	l2cap_chan_unlock(chan);
4121 
4122 	chan->ops->close(chan);
4123 	l2cap_chan_put(chan);
4124 
4125 	mutex_unlock(&conn->chan_lock);
4126 
4127 	return 0;
4128 }
4129 
4130 static inline int l2cap_information_req(struct l2cap_conn *conn,
4131 					struct l2cap_cmd_hdr *cmd, u8 *data)
4132 {
4133 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4134 	u16 type;
4135 
4136 	type = __le16_to_cpu(req->type);
4137 
4138 	BT_DBG("type 0x%4.4x", type);
4139 
4140 	if (type == L2CAP_IT_FEAT_MASK) {
4141 		u8 buf[8];
4142 		u32 feat_mask = l2cap_feat_mask;
4143 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4144 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
4145 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4146 		if (!disable_ertm)
4147 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4148 				| L2CAP_FEAT_FCS;
4149 		if (enable_hs)
4150 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4151 				| L2CAP_FEAT_EXT_WINDOW;
4152 
4153 		put_unaligned_le32(feat_mask, rsp->data);
4154 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4155 			       buf);
4156 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4157 		u8 buf[12];
4158 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4159 
4160 		if (enable_hs)
4161 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4162 		else
4163 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4164 
4165 		rsp->type   = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4166 		rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
4167 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4168 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4169 			       buf);
4170 	} else {
4171 		struct l2cap_info_rsp rsp;
4172 		rsp.type   = cpu_to_le16(type);
4173 		rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
4174 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4175 			       &rsp);
4176 	}
4177 
4178 	return 0;
4179 }
4180 
4181 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4182 					struct l2cap_cmd_hdr *cmd, u8 *data)
4183 {
4184 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4185 	u16 type, result;
4186 
4187 	type   = __le16_to_cpu(rsp->type);
4188 	result = __le16_to_cpu(rsp->result);
4189 
4190 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4191 
4192 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4193 	if (cmd->ident != conn->info_ident ||
4194 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4195 		return 0;
4196 
4197 	cancel_delayed_work(&conn->info_timer);
4198 
4199 	if (result != L2CAP_IR_SUCCESS) {
4200 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4201 		conn->info_ident = 0;
4202 
4203 		l2cap_conn_start(conn);
4204 
4205 		return 0;
4206 	}
4207 
4208 	switch (type) {
4209 	case L2CAP_IT_FEAT_MASK:
4210 		conn->feat_mask = get_unaligned_le32(rsp->data);
4211 
4212 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4213 			struct l2cap_info_req req;
4214 			req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4215 
4216 			conn->info_ident = l2cap_get_ident(conn);
4217 
4218 			l2cap_send_cmd(conn, conn->info_ident,
4219 				       L2CAP_INFO_REQ, sizeof(req), &req);
4220 		} else {
4221 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4222 			conn->info_ident = 0;
4223 
4224 			l2cap_conn_start(conn);
4225 		}
4226 		break;
4227 
4228 	case L2CAP_IT_FIXED_CHAN:
4229 		conn->fixed_chan_mask = rsp->data[0];
4230 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4231 		conn->info_ident = 0;
4232 
4233 		l2cap_conn_start(conn);
4234 		break;
4235 	}
4236 
4237 	return 0;
4238 }
4239 
4240 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4241 				    struct l2cap_cmd_hdr *cmd,
4242 				    u16 cmd_len, void *data)
4243 {
4244 	struct l2cap_create_chan_req *req = data;
4245 	struct l2cap_create_chan_rsp rsp;
4246 	struct l2cap_chan *chan;
4247 	struct hci_dev *hdev;
4248 	u16 psm, scid;
4249 
4250 	if (cmd_len != sizeof(*req))
4251 		return -EPROTO;
4252 
4253 	if (!enable_hs)
4254 		return -EINVAL;
4255 
4256 	psm = le16_to_cpu(req->psm);
4257 	scid = le16_to_cpu(req->scid);
4258 
4259 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4260 
4261 	/* For controller id 0 make BR/EDR connection */
4262 	if (req->amp_id == HCI_BREDR_ID) {
4263 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4264 			      req->amp_id);
4265 		return 0;
4266 	}
4267 
4268 	/* Validate AMP controller id */
4269 	hdev = hci_dev_get(req->amp_id);
4270 	if (!hdev)
4271 		goto error;
4272 
4273 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4274 		hci_dev_put(hdev);
4275 		goto error;
4276 	}
4277 
4278 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4279 			     req->amp_id);
4280 	if (chan) {
4281 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4282 		struct hci_conn *hs_hcon;
4283 
4284 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
4285 		if (!hs_hcon) {
4286 			hci_dev_put(hdev);
4287 			return -EFAULT;
4288 		}
4289 
4290 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4291 
4292 		mgr->bredr_chan = chan;
4293 		chan->hs_hcon = hs_hcon;
4294 		chan->fcs = L2CAP_FCS_NONE;
4295 		conn->mtu = hdev->block_mtu;
4296 	}
4297 
4298 	hci_dev_put(hdev);
4299 
4300 	return 0;
4301 
4302 error:
4303 	rsp.dcid = 0;
4304 	rsp.scid = cpu_to_le16(scid);
4305 	rsp.result = __constant_cpu_to_le16(L2CAP_CR_BAD_AMP);
4306 	rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4307 
4308 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4309 		       sizeof(rsp), &rsp);
4310 
4311 	return -EFAULT;
4312 }
4313 
4314 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4315 {
4316 	struct l2cap_move_chan_req req;
4317 	u8 ident;
4318 
4319 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4320 
4321 	ident = l2cap_get_ident(chan->conn);
4322 	chan->ident = ident;
4323 
4324 	req.icid = cpu_to_le16(chan->scid);
4325 	req.dest_amp_id = dest_amp_id;
4326 
4327 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4328 		       &req);
4329 
4330 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4331 }
4332 
4333 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4334 {
4335 	struct l2cap_move_chan_rsp rsp;
4336 
4337 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4338 
4339 	rsp.icid = cpu_to_le16(chan->dcid);
4340 	rsp.result = cpu_to_le16(result);
4341 
4342 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4343 		       sizeof(rsp), &rsp);
4344 }
4345 
4346 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4347 {
4348 	struct l2cap_move_chan_cfm cfm;
4349 
4350 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4351 
4352 	chan->ident = l2cap_get_ident(chan->conn);
4353 
4354 	cfm.icid = cpu_to_le16(chan->scid);
4355 	cfm.result = cpu_to_le16(result);
4356 
4357 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4358 		       sizeof(cfm), &cfm);
4359 
4360 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4361 }
4362 
4363 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4364 {
4365 	struct l2cap_move_chan_cfm cfm;
4366 
4367 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4368 
4369 	cfm.icid = cpu_to_le16(icid);
4370 	cfm.result = __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4371 
4372 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4373 		       sizeof(cfm), &cfm);
4374 }
4375 
4376 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4377 					 u16 icid)
4378 {
4379 	struct l2cap_move_chan_cfm_rsp rsp;
4380 
4381 	BT_DBG("icid 0x%4.4x", icid);
4382 
4383 	rsp.icid = cpu_to_le16(icid);
4384 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4385 }
4386 
4387 static void __release_logical_link(struct l2cap_chan *chan)
4388 {
4389 	chan->hs_hchan = NULL;
4390 	chan->hs_hcon = NULL;
4391 
4392 	/* Placeholder - release the logical link */
4393 }
4394 
4395 static void l2cap_logical_fail(struct l2cap_chan *chan)
4396 {
4397 	/* Logical link setup failed */
4398 	if (chan->state != BT_CONNECTED) {
4399 		/* Create channel failure, disconnect */
4400 		l2cap_send_disconn_req(chan, ECONNRESET);
4401 		return;
4402 	}
4403 
4404 	switch (chan->move_role) {
4405 	case L2CAP_MOVE_ROLE_RESPONDER:
4406 		l2cap_move_done(chan);
4407 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4408 		break;
4409 	case L2CAP_MOVE_ROLE_INITIATOR:
4410 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4411 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4412 			/* Remote has only sent pending or
4413 			 * success responses, clean up
4414 			 */
4415 			l2cap_move_done(chan);
4416 		}
4417 
4418 		/* Other amp move states imply that the move
4419 		 * has already aborted
4420 		 */
4421 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4422 		break;
4423 	}
4424 }
4425 
4426 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4427 					struct hci_chan *hchan)
4428 {
4429 	struct l2cap_conf_rsp rsp;
4430 
4431 	chan->hs_hchan = hchan;
4432 	chan->hs_hcon->l2cap_data = chan->conn;
4433 
4434 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4435 
4436 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4437 		int err;
4438 
4439 		set_default_fcs(chan);
4440 
4441 		err = l2cap_ertm_init(chan);
4442 		if (err < 0)
4443 			l2cap_send_disconn_req(chan, -err);
4444 		else
4445 			l2cap_chan_ready(chan);
4446 	}
4447 }
4448 
4449 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4450 				      struct hci_chan *hchan)
4451 {
4452 	chan->hs_hcon = hchan->conn;
4453 	chan->hs_hcon->l2cap_data = chan->conn;
4454 
4455 	BT_DBG("move_state %d", chan->move_state);
4456 
4457 	switch (chan->move_state) {
4458 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4459 		/* Move confirm will be sent after a success
4460 		 * response is received
4461 		 */
4462 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4463 		break;
4464 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4465 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4466 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4467 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4468 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4469 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4470 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4471 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4472 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4473 		}
4474 		break;
4475 	default:
4476 		/* Move was not in expected state, free the channel */
4477 		__release_logical_link(chan);
4478 
4479 		chan->move_state = L2CAP_MOVE_STABLE;
4480 	}
4481 }
4482 
4483 /* Call with chan locked */
4484 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4485 		       u8 status)
4486 {
4487 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4488 
4489 	if (status) {
4490 		l2cap_logical_fail(chan);
4491 		__release_logical_link(chan);
4492 		return;
4493 	}
4494 
4495 	if (chan->state != BT_CONNECTED) {
4496 		/* Ignore logical link if channel is on BR/EDR */
4497 		if (chan->local_amp_id)
4498 			l2cap_logical_finish_create(chan, hchan);
4499 	} else {
4500 		l2cap_logical_finish_move(chan, hchan);
4501 	}
4502 }
4503 
4504 void l2cap_move_start(struct l2cap_chan *chan)
4505 {
4506 	BT_DBG("chan %p", chan);
4507 
4508 	if (chan->local_amp_id == HCI_BREDR_ID) {
4509 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4510 			return;
4511 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4512 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4513 		/* Placeholder - start physical link setup */
4514 	} else {
4515 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4516 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4517 		chan->move_id = 0;
4518 		l2cap_move_setup(chan);
4519 		l2cap_send_move_chan_req(chan, 0);
4520 	}
4521 }
4522 
4523 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4524 			    u8 local_amp_id, u8 remote_amp_id)
4525 {
4526 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4527 	       local_amp_id, remote_amp_id);
4528 
4529 	chan->fcs = L2CAP_FCS_NONE;
4530 
4531 	/* Outgoing channel on AMP */
4532 	if (chan->state == BT_CONNECT) {
4533 		if (result == L2CAP_CR_SUCCESS) {
4534 			chan->local_amp_id = local_amp_id;
4535 			l2cap_send_create_chan_req(chan, remote_amp_id);
4536 		} else {
4537 			/* Revert to BR/EDR connect */
4538 			l2cap_send_conn_req(chan);
4539 		}
4540 
4541 		return;
4542 	}
4543 
4544 	/* Incoming channel on AMP */
4545 	if (__l2cap_no_conn_pending(chan)) {
4546 		struct l2cap_conn_rsp rsp;
4547 		char buf[128];
4548 		rsp.scid = cpu_to_le16(chan->dcid);
4549 		rsp.dcid = cpu_to_le16(chan->scid);
4550 
4551 		if (result == L2CAP_CR_SUCCESS) {
4552 			/* Send successful response */
4553 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
4554 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4555 		} else {
4556 			/* Send negative response */
4557 			rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4558 			rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4559 		}
4560 
4561 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4562 			       sizeof(rsp), &rsp);
4563 
4564 		if (result == L2CAP_CR_SUCCESS) {
4565 			__l2cap_state_change(chan, BT_CONFIG);
4566 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4567 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4568 				       L2CAP_CONF_REQ,
4569 				       l2cap_build_conf_req(chan, buf), buf);
4570 			chan->num_conf_req++;
4571 		}
4572 	}
4573 }
4574 
4575 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4576 				   u8 remote_amp_id)
4577 {
4578 	l2cap_move_setup(chan);
4579 	chan->move_id = local_amp_id;
4580 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4581 
4582 	l2cap_send_move_chan_req(chan, remote_amp_id);
4583 }
4584 
4585 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4586 {
4587 	struct hci_chan *hchan = NULL;
4588 
4589 	/* Placeholder - get hci_chan for logical link */
4590 
4591 	if (hchan) {
4592 		if (hchan->state == BT_CONNECTED) {
4593 			/* Logical link is ready to go */
4594 			chan->hs_hcon = hchan->conn;
4595 			chan->hs_hcon->l2cap_data = chan->conn;
4596 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4597 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4598 
4599 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4600 		} else {
4601 			/* Wait for logical link to be ready */
4602 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4603 		}
4604 	} else {
4605 		/* Logical link not available */
4606 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4607 	}
4608 }
4609 
4610 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4611 {
4612 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4613 		u8 rsp_result;
4614 		if (result == -EINVAL)
4615 			rsp_result = L2CAP_MR_BAD_ID;
4616 		else
4617 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4618 
4619 		l2cap_send_move_chan_rsp(chan, rsp_result);
4620 	}
4621 
4622 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4623 	chan->move_state = L2CAP_MOVE_STABLE;
4624 
4625 	/* Restart data transmission */
4626 	l2cap_ertm_send(chan);
4627 }
4628 
4629 /* Invoke with locked chan */
4630 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4631 {
4632 	u8 local_amp_id = chan->local_amp_id;
4633 	u8 remote_amp_id = chan->remote_amp_id;
4634 
4635 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4636 	       chan, result, local_amp_id, remote_amp_id);
4637 
4638 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4639 		l2cap_chan_unlock(chan);
4640 		return;
4641 	}
4642 
4643 	if (chan->state != BT_CONNECTED) {
4644 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4645 	} else if (result != L2CAP_MR_SUCCESS) {
4646 		l2cap_do_move_cancel(chan, result);
4647 	} else {
4648 		switch (chan->move_role) {
4649 		case L2CAP_MOVE_ROLE_INITIATOR:
4650 			l2cap_do_move_initiate(chan, local_amp_id,
4651 					       remote_amp_id);
4652 			break;
4653 		case L2CAP_MOVE_ROLE_RESPONDER:
4654 			l2cap_do_move_respond(chan, result);
4655 			break;
4656 		default:
4657 			l2cap_do_move_cancel(chan, result);
4658 			break;
4659 		}
4660 	}
4661 }
4662 
4663 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4664 					 struct l2cap_cmd_hdr *cmd,
4665 					 u16 cmd_len, void *data)
4666 {
4667 	struct l2cap_move_chan_req *req = data;
4668 	struct l2cap_move_chan_rsp rsp;
4669 	struct l2cap_chan *chan;
4670 	u16 icid = 0;
4671 	u16 result = L2CAP_MR_NOT_ALLOWED;
4672 
4673 	if (cmd_len != sizeof(*req))
4674 		return -EPROTO;
4675 
4676 	icid = le16_to_cpu(req->icid);
4677 
4678 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4679 
4680 	if (!enable_hs)
4681 		return -EINVAL;
4682 
4683 	chan = l2cap_get_chan_by_dcid(conn, icid);
4684 	if (!chan) {
4685 		rsp.icid = cpu_to_le16(icid);
4686 		rsp.result = __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4687 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4688 			       sizeof(rsp), &rsp);
4689 		return 0;
4690 	}
4691 
4692 	chan->ident = cmd->ident;
4693 
4694 	if (chan->scid < L2CAP_CID_DYN_START ||
4695 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4696 	    (chan->mode != L2CAP_MODE_ERTM &&
4697 	     chan->mode != L2CAP_MODE_STREAMING)) {
4698 		result = L2CAP_MR_NOT_ALLOWED;
4699 		goto send_move_response;
4700 	}
4701 
4702 	if (chan->local_amp_id == req->dest_amp_id) {
4703 		result = L2CAP_MR_SAME_ID;
4704 		goto send_move_response;
4705 	}
4706 
4707 	if (req->dest_amp_id) {
4708 		struct hci_dev *hdev;
4709 		hdev = hci_dev_get(req->dest_amp_id);
4710 		if (!hdev || hdev->dev_type != HCI_AMP ||
4711 		    !test_bit(HCI_UP, &hdev->flags)) {
4712 			if (hdev)
4713 				hci_dev_put(hdev);
4714 
4715 			result = L2CAP_MR_BAD_ID;
4716 			goto send_move_response;
4717 		}
4718 		hci_dev_put(hdev);
4719 	}
4720 
4721 	/* Detect a move collision.  Only send a collision response
4722 	 * if this side has "lost", otherwise proceed with the move.
4723 	 * The winner has the larger bd_addr.
4724 	 */
4725 	if ((__chan_is_moving(chan) ||
4726 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4727 	    bacmp(conn->src, conn->dst) > 0) {
4728 		result = L2CAP_MR_COLLISION;
4729 		goto send_move_response;
4730 	}
4731 
4732 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4733 	l2cap_move_setup(chan);
4734 	chan->move_id = req->dest_amp_id;
4735 	icid = chan->dcid;
4736 
4737 	if (!req->dest_amp_id) {
4738 		/* Moving to BR/EDR */
4739 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4740 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4741 			result = L2CAP_MR_PEND;
4742 		} else {
4743 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4744 			result = L2CAP_MR_SUCCESS;
4745 		}
4746 	} else {
4747 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4748 		/* Placeholder - uncomment when amp functions are available */
4749 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4750 		result = L2CAP_MR_PEND;
4751 	}
4752 
4753 send_move_response:
4754 	l2cap_send_move_chan_rsp(chan, result);
4755 
4756 	l2cap_chan_unlock(chan);
4757 
4758 	return 0;
4759 }
4760 
4761 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4762 {
4763 	struct l2cap_chan *chan;
4764 	struct hci_chan *hchan = NULL;
4765 
4766 	chan = l2cap_get_chan_by_scid(conn, icid);
4767 	if (!chan) {
4768 		l2cap_send_move_chan_cfm_icid(conn, icid);
4769 		return;
4770 	}
4771 
4772 	__clear_chan_timer(chan);
4773 	if (result == L2CAP_MR_PEND)
4774 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4775 
4776 	switch (chan->move_state) {
4777 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4778 		/* Move confirm will be sent when logical link
4779 		 * is complete.
4780 		 */
4781 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4782 		break;
4783 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4784 		if (result == L2CAP_MR_PEND) {
4785 			break;
4786 		} else if (test_bit(CONN_LOCAL_BUSY,
4787 				    &chan->conn_state)) {
4788 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4789 		} else {
4790 			/* Logical link is up or moving to BR/EDR,
4791 			 * proceed with move
4792 			 */
4793 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4794 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4795 		}
4796 		break;
4797 	case L2CAP_MOVE_WAIT_RSP:
4798 		/* Moving to AMP */
4799 		if (result == L2CAP_MR_SUCCESS) {
4800 			/* Remote is ready, send confirm immediately
4801 			 * after logical link is ready
4802 			 */
4803 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4804 		} else {
4805 			/* Both logical link and move success
4806 			 * are required to confirm
4807 			 */
4808 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4809 		}
4810 
4811 		/* Placeholder - get hci_chan for logical link */
4812 		if (!hchan) {
4813 			/* Logical link not available */
4814 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4815 			break;
4816 		}
4817 
4818 		/* If the logical link is not yet connected, do not
4819 		 * send confirmation.
4820 		 */
4821 		if (hchan->state != BT_CONNECTED)
4822 			break;
4823 
4824 		/* Logical link is already ready to go */
4825 
4826 		chan->hs_hcon = hchan->conn;
4827 		chan->hs_hcon->l2cap_data = chan->conn;
4828 
4829 		if (result == L2CAP_MR_SUCCESS) {
4830 			/* Can confirm now */
4831 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4832 		} else {
4833 			/* Now only need move success
4834 			 * to confirm
4835 			 */
4836 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4837 		}
4838 
4839 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4840 		break;
4841 	default:
4842 		/* Any other amp move state means the move failed. */
4843 		chan->move_id = chan->local_amp_id;
4844 		l2cap_move_done(chan);
4845 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4846 	}
4847 
4848 	l2cap_chan_unlock(chan);
4849 }
4850 
4851 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
4852 			    u16 result)
4853 {
4854 	struct l2cap_chan *chan;
4855 
4856 	chan = l2cap_get_chan_by_ident(conn, ident);
4857 	if (!chan) {
4858 		/* Could not locate channel, icid is best guess */
4859 		l2cap_send_move_chan_cfm_icid(conn, icid);
4860 		return;
4861 	}
4862 
4863 	__clear_chan_timer(chan);
4864 
4865 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4866 		if (result == L2CAP_MR_COLLISION) {
4867 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4868 		} else {
4869 			/* Cleanup - cancel move */
4870 			chan->move_id = chan->local_amp_id;
4871 			l2cap_move_done(chan);
4872 		}
4873 	}
4874 
4875 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4876 
4877 	l2cap_chan_unlock(chan);
4878 }
4879 
4880 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4881 				  struct l2cap_cmd_hdr *cmd,
4882 				  u16 cmd_len, void *data)
4883 {
4884 	struct l2cap_move_chan_rsp *rsp = data;
4885 	u16 icid, result;
4886 
4887 	if (cmd_len != sizeof(*rsp))
4888 		return -EPROTO;
4889 
4890 	icid = le16_to_cpu(rsp->icid);
4891 	result = le16_to_cpu(rsp->result);
4892 
4893 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4894 
4895 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
4896 		l2cap_move_continue(conn, icid, result);
4897 	else
4898 		l2cap_move_fail(conn, cmd->ident, icid, result);
4899 
4900 	return 0;
4901 }
4902 
4903 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4904 				      struct l2cap_cmd_hdr *cmd,
4905 				      u16 cmd_len, void *data)
4906 {
4907 	struct l2cap_move_chan_cfm *cfm = data;
4908 	struct l2cap_chan *chan;
4909 	u16 icid, result;
4910 
4911 	if (cmd_len != sizeof(*cfm))
4912 		return -EPROTO;
4913 
4914 	icid = le16_to_cpu(cfm->icid);
4915 	result = le16_to_cpu(cfm->result);
4916 
4917 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4918 
4919 	chan = l2cap_get_chan_by_dcid(conn, icid);
4920 	if (!chan) {
4921 		/* Spec requires a response even if the icid was not found */
4922 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4923 		return 0;
4924 	}
4925 
4926 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
4927 		if (result == L2CAP_MC_CONFIRMED) {
4928 			chan->local_amp_id = chan->move_id;
4929 			if (!chan->local_amp_id)
4930 				__release_logical_link(chan);
4931 		} else {
4932 			chan->move_id = chan->local_amp_id;
4933 		}
4934 
4935 		l2cap_move_done(chan);
4936 	}
4937 
4938 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4939 
4940 	l2cap_chan_unlock(chan);
4941 
4942 	return 0;
4943 }
4944 
4945 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4946 						 struct l2cap_cmd_hdr *cmd,
4947 						 u16 cmd_len, void *data)
4948 {
4949 	struct l2cap_move_chan_cfm_rsp *rsp = data;
4950 	struct l2cap_chan *chan;
4951 	u16 icid;
4952 
4953 	if (cmd_len != sizeof(*rsp))
4954 		return -EPROTO;
4955 
4956 	icid = le16_to_cpu(rsp->icid);
4957 
4958 	BT_DBG("icid 0x%4.4x", icid);
4959 
4960 	chan = l2cap_get_chan_by_scid(conn, icid);
4961 	if (!chan)
4962 		return 0;
4963 
4964 	__clear_chan_timer(chan);
4965 
4966 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
4967 		chan->local_amp_id = chan->move_id;
4968 
4969 		if (!chan->local_amp_id && chan->hs_hchan)
4970 			__release_logical_link(chan);
4971 
4972 		l2cap_move_done(chan);
4973 	}
4974 
4975 	l2cap_chan_unlock(chan);
4976 
4977 	return 0;
4978 }
4979 
4980 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4981 					 u16 to_multiplier)
4982 {
4983 	u16 max_latency;
4984 
4985 	if (min > max || min < 6 || max > 3200)
4986 		return -EINVAL;
4987 
4988 	if (to_multiplier < 10 || to_multiplier > 3200)
4989 		return -EINVAL;
4990 
4991 	if (max >= to_multiplier * 8)
4992 		return -EINVAL;
4993 
4994 	max_latency = (to_multiplier * 8 / max) - 1;
4995 	if (latency > 499 || latency > max_latency)
4996 		return -EINVAL;
4997 
4998 	return 0;
4999 }
5000 
5001 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5002 					      struct l2cap_cmd_hdr *cmd,
5003 					      u8 *data)
5004 {
5005 	struct hci_conn *hcon = conn->hcon;
5006 	struct l2cap_conn_param_update_req *req;
5007 	struct l2cap_conn_param_update_rsp rsp;
5008 	u16 min, max, latency, to_multiplier, cmd_len;
5009 	int err;
5010 
5011 	if (!(hcon->link_mode & HCI_LM_MASTER))
5012 		return -EINVAL;
5013 
5014 	cmd_len = __le16_to_cpu(cmd->len);
5015 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5016 		return -EPROTO;
5017 
5018 	req = (struct l2cap_conn_param_update_req *) data;
5019 	min		= __le16_to_cpu(req->min);
5020 	max		= __le16_to_cpu(req->max);
5021 	latency		= __le16_to_cpu(req->latency);
5022 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5023 
5024 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5025 	       min, max, latency, to_multiplier);
5026 
5027 	memset(&rsp, 0, sizeof(rsp));
5028 
5029 	err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5030 	if (err)
5031 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5032 	else
5033 		rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5034 
5035 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5036 		       sizeof(rsp), &rsp);
5037 
5038 	if (!err)
5039 		hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5040 
5041 	return 0;
5042 }
5043 
5044 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5045 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5046 				      u8 *data)
5047 {
5048 	int err = 0;
5049 
5050 	switch (cmd->code) {
5051 	case L2CAP_COMMAND_REJ:
5052 		l2cap_command_rej(conn, cmd, data);
5053 		break;
5054 
5055 	case L2CAP_CONN_REQ:
5056 		err = l2cap_connect_req(conn, cmd, data);
5057 		break;
5058 
5059 	case L2CAP_CONN_RSP:
5060 	case L2CAP_CREATE_CHAN_RSP:
5061 		err = l2cap_connect_create_rsp(conn, cmd, data);
5062 		break;
5063 
5064 	case L2CAP_CONF_REQ:
5065 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5066 		break;
5067 
5068 	case L2CAP_CONF_RSP:
5069 		err = l2cap_config_rsp(conn, cmd, data);
5070 		break;
5071 
5072 	case L2CAP_DISCONN_REQ:
5073 		err = l2cap_disconnect_req(conn, cmd, data);
5074 		break;
5075 
5076 	case L2CAP_DISCONN_RSP:
5077 		err = l2cap_disconnect_rsp(conn, cmd, data);
5078 		break;
5079 
5080 	case L2CAP_ECHO_REQ:
5081 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5082 		break;
5083 
5084 	case L2CAP_ECHO_RSP:
5085 		break;
5086 
5087 	case L2CAP_INFO_REQ:
5088 		err = l2cap_information_req(conn, cmd, data);
5089 		break;
5090 
5091 	case L2CAP_INFO_RSP:
5092 		err = l2cap_information_rsp(conn, cmd, data);
5093 		break;
5094 
5095 	case L2CAP_CREATE_CHAN_REQ:
5096 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5097 		break;
5098 
5099 	case L2CAP_MOVE_CHAN_REQ:
5100 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5101 		break;
5102 
5103 	case L2CAP_MOVE_CHAN_RSP:
5104 		err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5105 		break;
5106 
5107 	case L2CAP_MOVE_CHAN_CFM:
5108 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5109 		break;
5110 
5111 	case L2CAP_MOVE_CHAN_CFM_RSP:
5112 		err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5113 		break;
5114 
5115 	default:
5116 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5117 		err = -EINVAL;
5118 		break;
5119 	}
5120 
5121 	return err;
5122 }
5123 
5124 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5125 				   struct l2cap_cmd_hdr *cmd, u8 *data)
5126 {
5127 	switch (cmd->code) {
5128 	case L2CAP_COMMAND_REJ:
5129 		return 0;
5130 
5131 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5132 		return l2cap_conn_param_update_req(conn, cmd, data);
5133 
5134 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5135 		return 0;
5136 
5137 	default:
5138 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5139 		return -EINVAL;
5140 	}
5141 }
5142 
5143 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5144 				     struct sk_buff *skb)
5145 {
5146 	u8 *data = skb->data;
5147 	int len = skb->len;
5148 	struct l2cap_cmd_hdr cmd;
5149 	int err;
5150 
5151 	l2cap_raw_recv(conn, skb);
5152 
5153 	while (len >= L2CAP_CMD_HDR_SIZE) {
5154 		u16 cmd_len;
5155 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5156 		data += L2CAP_CMD_HDR_SIZE;
5157 		len  -= L2CAP_CMD_HDR_SIZE;
5158 
5159 		cmd_len = le16_to_cpu(cmd.len);
5160 
5161 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5162 		       cmd.ident);
5163 
5164 		if (cmd_len > len || !cmd.ident) {
5165 			BT_DBG("corrupted command");
5166 			break;
5167 		}
5168 
5169 		if (conn->hcon->type == LE_LINK)
5170 			err = l2cap_le_sig_cmd(conn, &cmd, data);
5171 		else
5172 			err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5173 
5174 		if (err) {
5175 			struct l2cap_cmd_rej_unk rej;
5176 
5177 			BT_ERR("Wrong link type (%d)", err);
5178 
5179 			/* FIXME: Map err to a valid reason */
5180 			rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5181 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5182 				       sizeof(rej), &rej);
5183 		}
5184 
5185 		data += cmd_len;
5186 		len  -= cmd_len;
5187 	}
5188 
5189 	kfree_skb(skb);
5190 }
5191 
5192 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5193 {
5194 	u16 our_fcs, rcv_fcs;
5195 	int hdr_size;
5196 
5197 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5198 		hdr_size = L2CAP_EXT_HDR_SIZE;
5199 	else
5200 		hdr_size = L2CAP_ENH_HDR_SIZE;
5201 
5202 	if (chan->fcs == L2CAP_FCS_CRC16) {
5203 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5204 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5205 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5206 
5207 		if (our_fcs != rcv_fcs)
5208 			return -EBADMSG;
5209 	}
5210 	return 0;
5211 }
5212 
5213 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5214 {
5215 	struct l2cap_ctrl control;
5216 
5217 	BT_DBG("chan %p", chan);
5218 
5219 	memset(&control, 0, sizeof(control));
5220 	control.sframe = 1;
5221 	control.final = 1;
5222 	control.reqseq = chan->buffer_seq;
5223 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5224 
5225 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5226 		control.super = L2CAP_SUPER_RNR;
5227 		l2cap_send_sframe(chan, &control);
5228 	}
5229 
5230 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5231 	    chan->unacked_frames > 0)
5232 		__set_retrans_timer(chan);
5233 
5234 	/* Send pending iframes */
5235 	l2cap_ertm_send(chan);
5236 
5237 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5238 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5239 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5240 		 * send it now.
5241 		 */
5242 		control.super = L2CAP_SUPER_RR;
5243 		l2cap_send_sframe(chan, &control);
5244 	}
5245 }
5246 
5247 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5248 			    struct sk_buff **last_frag)
5249 {
5250 	/* skb->len reflects data in skb as well as all fragments
5251 	 * skb->data_len reflects only data in fragments
5252 	 */
5253 	if (!skb_has_frag_list(skb))
5254 		skb_shinfo(skb)->frag_list = new_frag;
5255 
5256 	new_frag->next = NULL;
5257 
5258 	(*last_frag)->next = new_frag;
5259 	*last_frag = new_frag;
5260 
5261 	skb->len += new_frag->len;
5262 	skb->data_len += new_frag->len;
5263 	skb->truesize += new_frag->truesize;
5264 }
5265 
5266 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5267 				struct l2cap_ctrl *control)
5268 {
5269 	int err = -EINVAL;
5270 
5271 	switch (control->sar) {
5272 	case L2CAP_SAR_UNSEGMENTED:
5273 		if (chan->sdu)
5274 			break;
5275 
5276 		err = chan->ops->recv(chan, skb);
5277 		break;
5278 
5279 	case L2CAP_SAR_START:
5280 		if (chan->sdu)
5281 			break;
5282 
5283 		chan->sdu_len = get_unaligned_le16(skb->data);
5284 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5285 
5286 		if (chan->sdu_len > chan->imtu) {
5287 			err = -EMSGSIZE;
5288 			break;
5289 		}
5290 
5291 		if (skb->len >= chan->sdu_len)
5292 			break;
5293 
5294 		chan->sdu = skb;
5295 		chan->sdu_last_frag = skb;
5296 
5297 		skb = NULL;
5298 		err = 0;
5299 		break;
5300 
5301 	case L2CAP_SAR_CONTINUE:
5302 		if (!chan->sdu)
5303 			break;
5304 
5305 		append_skb_frag(chan->sdu, skb,
5306 				&chan->sdu_last_frag);
5307 		skb = NULL;
5308 
5309 		if (chan->sdu->len >= chan->sdu_len)
5310 			break;
5311 
5312 		err = 0;
5313 		break;
5314 
5315 	case L2CAP_SAR_END:
5316 		if (!chan->sdu)
5317 			break;
5318 
5319 		append_skb_frag(chan->sdu, skb,
5320 				&chan->sdu_last_frag);
5321 		skb = NULL;
5322 
5323 		if (chan->sdu->len != chan->sdu_len)
5324 			break;
5325 
5326 		err = chan->ops->recv(chan, chan->sdu);
5327 
5328 		if (!err) {
5329 			/* Reassembly complete */
5330 			chan->sdu = NULL;
5331 			chan->sdu_last_frag = NULL;
5332 			chan->sdu_len = 0;
5333 		}
5334 		break;
5335 	}
5336 
5337 	if (err) {
5338 		kfree_skb(skb);
5339 		kfree_skb(chan->sdu);
5340 		chan->sdu = NULL;
5341 		chan->sdu_last_frag = NULL;
5342 		chan->sdu_len = 0;
5343 	}
5344 
5345 	return err;
5346 }
5347 
5348 static int l2cap_resegment(struct l2cap_chan *chan)
5349 {
5350 	/* Placeholder */
5351 	return 0;
5352 }
5353 
5354 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5355 {
5356 	u8 event;
5357 
5358 	if (chan->mode != L2CAP_MODE_ERTM)
5359 		return;
5360 
5361 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5362 	l2cap_tx(chan, NULL, NULL, event);
5363 }
5364 
5365 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5366 {
5367 	int err = 0;
5368 	/* Pass sequential frames to l2cap_reassemble_sdu()
5369 	 * until a gap is encountered.
5370 	 */
5371 
5372 	BT_DBG("chan %p", chan);
5373 
5374 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5375 		struct sk_buff *skb;
5376 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5377 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5378 
5379 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5380 
5381 		if (!skb)
5382 			break;
5383 
5384 		skb_unlink(skb, &chan->srej_q);
5385 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5386 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5387 		if (err)
5388 			break;
5389 	}
5390 
5391 	if (skb_queue_empty(&chan->srej_q)) {
5392 		chan->rx_state = L2CAP_RX_STATE_RECV;
5393 		l2cap_send_ack(chan);
5394 	}
5395 
5396 	return err;
5397 }
5398 
5399 static void l2cap_handle_srej(struct l2cap_chan *chan,
5400 			      struct l2cap_ctrl *control)
5401 {
5402 	struct sk_buff *skb;
5403 
5404 	BT_DBG("chan %p, control %p", chan, control);
5405 
5406 	if (control->reqseq == chan->next_tx_seq) {
5407 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5408 		l2cap_send_disconn_req(chan, ECONNRESET);
5409 		return;
5410 	}
5411 
5412 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5413 
5414 	if (skb == NULL) {
5415 		BT_DBG("Seq %d not available for retransmission",
5416 		       control->reqseq);
5417 		return;
5418 	}
5419 
5420 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5421 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5422 		l2cap_send_disconn_req(chan, ECONNRESET);
5423 		return;
5424 	}
5425 
5426 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5427 
5428 	if (control->poll) {
5429 		l2cap_pass_to_tx(chan, control);
5430 
5431 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5432 		l2cap_retransmit(chan, control);
5433 		l2cap_ertm_send(chan);
5434 
5435 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5436 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5437 			chan->srej_save_reqseq = control->reqseq;
5438 		}
5439 	} else {
5440 		l2cap_pass_to_tx_fbit(chan, control);
5441 
5442 		if (control->final) {
5443 			if (chan->srej_save_reqseq != control->reqseq ||
5444 			    !test_and_clear_bit(CONN_SREJ_ACT,
5445 						&chan->conn_state))
5446 				l2cap_retransmit(chan, control);
5447 		} else {
5448 			l2cap_retransmit(chan, control);
5449 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5450 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5451 				chan->srej_save_reqseq = control->reqseq;
5452 			}
5453 		}
5454 	}
5455 }
5456 
5457 static void l2cap_handle_rej(struct l2cap_chan *chan,
5458 			     struct l2cap_ctrl *control)
5459 {
5460 	struct sk_buff *skb;
5461 
5462 	BT_DBG("chan %p, control %p", chan, control);
5463 
5464 	if (control->reqseq == chan->next_tx_seq) {
5465 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5466 		l2cap_send_disconn_req(chan, ECONNRESET);
5467 		return;
5468 	}
5469 
5470 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5471 
5472 	if (chan->max_tx && skb &&
5473 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5474 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5475 		l2cap_send_disconn_req(chan, ECONNRESET);
5476 		return;
5477 	}
5478 
5479 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5480 
5481 	l2cap_pass_to_tx(chan, control);
5482 
5483 	if (control->final) {
5484 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5485 			l2cap_retransmit_all(chan, control);
5486 	} else {
5487 		l2cap_retransmit_all(chan, control);
5488 		l2cap_ertm_send(chan);
5489 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5490 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5491 	}
5492 }
5493 
5494 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5495 {
5496 	BT_DBG("chan %p, txseq %d", chan, txseq);
5497 
5498 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5499 	       chan->expected_tx_seq);
5500 
5501 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5502 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5503 		    chan->tx_win) {
5504 			/* See notes below regarding "double poll" and
5505 			 * invalid packets.
5506 			 */
5507 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5508 				BT_DBG("Invalid/Ignore - after SREJ");
5509 				return L2CAP_TXSEQ_INVALID_IGNORE;
5510 			} else {
5511 				BT_DBG("Invalid - in window after SREJ sent");
5512 				return L2CAP_TXSEQ_INVALID;
5513 			}
5514 		}
5515 
5516 		if (chan->srej_list.head == txseq) {
5517 			BT_DBG("Expected SREJ");
5518 			return L2CAP_TXSEQ_EXPECTED_SREJ;
5519 		}
5520 
5521 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5522 			BT_DBG("Duplicate SREJ - txseq already stored");
5523 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
5524 		}
5525 
5526 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5527 			BT_DBG("Unexpected SREJ - not requested");
5528 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5529 		}
5530 	}
5531 
5532 	if (chan->expected_tx_seq == txseq) {
5533 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5534 		    chan->tx_win) {
5535 			BT_DBG("Invalid - txseq outside tx window");
5536 			return L2CAP_TXSEQ_INVALID;
5537 		} else {
5538 			BT_DBG("Expected");
5539 			return L2CAP_TXSEQ_EXPECTED;
5540 		}
5541 	}
5542 
5543 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5544 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5545 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
5546 		return L2CAP_TXSEQ_DUPLICATE;
5547 	}
5548 
5549 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5550 		/* A source of invalid packets is a "double poll" condition,
5551 		 * where delays cause us to send multiple poll packets.  If
5552 		 * the remote stack receives and processes both polls,
5553 		 * sequence numbers can wrap around in such a way that a
5554 		 * resent frame has a sequence number that looks like new data
5555 		 * with a sequence gap.  This would trigger an erroneous SREJ
5556 		 * request.
5557 		 *
5558 		 * Fortunately, this is impossible with a tx window that's
5559 		 * less than half of the maximum sequence number, which allows
5560 		 * invalid frames to be safely ignored.
5561 		 *
5562 		 * With tx window sizes greater than half of the tx window
5563 		 * maximum, the frame is invalid and cannot be ignored.  This
5564 		 * causes a disconnect.
5565 		 */
5566 
5567 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5568 			BT_DBG("Invalid/Ignore - txseq outside tx window");
5569 			return L2CAP_TXSEQ_INVALID_IGNORE;
5570 		} else {
5571 			BT_DBG("Invalid - txseq outside tx window");
5572 			return L2CAP_TXSEQ_INVALID;
5573 		}
5574 	} else {
5575 		BT_DBG("Unexpected - txseq indicates missing frames");
5576 		return L2CAP_TXSEQ_UNEXPECTED;
5577 	}
5578 }
5579 
5580 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5581 			       struct l2cap_ctrl *control,
5582 			       struct sk_buff *skb, u8 event)
5583 {
5584 	int err = 0;
5585 	bool skb_in_use = 0;
5586 
5587 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5588 	       event);
5589 
5590 	switch (event) {
5591 	case L2CAP_EV_RECV_IFRAME:
5592 		switch (l2cap_classify_txseq(chan, control->txseq)) {
5593 		case L2CAP_TXSEQ_EXPECTED:
5594 			l2cap_pass_to_tx(chan, control);
5595 
5596 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5597 				BT_DBG("Busy, discarding expected seq %d",
5598 				       control->txseq);
5599 				break;
5600 			}
5601 
5602 			chan->expected_tx_seq = __next_seq(chan,
5603 							   control->txseq);
5604 
5605 			chan->buffer_seq = chan->expected_tx_seq;
5606 			skb_in_use = 1;
5607 
5608 			err = l2cap_reassemble_sdu(chan, skb, control);
5609 			if (err)
5610 				break;
5611 
5612 			if (control->final) {
5613 				if (!test_and_clear_bit(CONN_REJ_ACT,
5614 							&chan->conn_state)) {
5615 					control->final = 0;
5616 					l2cap_retransmit_all(chan, control);
5617 					l2cap_ertm_send(chan);
5618 				}
5619 			}
5620 
5621 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
5622 				l2cap_send_ack(chan);
5623 			break;
5624 		case L2CAP_TXSEQ_UNEXPECTED:
5625 			l2cap_pass_to_tx(chan, control);
5626 
5627 			/* Can't issue SREJ frames in the local busy state.
5628 			 * Drop this frame, it will be seen as missing
5629 			 * when local busy is exited.
5630 			 */
5631 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5632 				BT_DBG("Busy, discarding unexpected seq %d",
5633 				       control->txseq);
5634 				break;
5635 			}
5636 
5637 			/* There was a gap in the sequence, so an SREJ
5638 			 * must be sent for each missing frame.  The
5639 			 * current frame is stored for later use.
5640 			 */
5641 			skb_queue_tail(&chan->srej_q, skb);
5642 			skb_in_use = 1;
5643 			BT_DBG("Queued %p (queue len %d)", skb,
5644 			       skb_queue_len(&chan->srej_q));
5645 
5646 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
5647 			l2cap_seq_list_clear(&chan->srej_list);
5648 			l2cap_send_srej(chan, control->txseq);
5649 
5650 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
5651 			break;
5652 		case L2CAP_TXSEQ_DUPLICATE:
5653 			l2cap_pass_to_tx(chan, control);
5654 			break;
5655 		case L2CAP_TXSEQ_INVALID_IGNORE:
5656 			break;
5657 		case L2CAP_TXSEQ_INVALID:
5658 		default:
5659 			l2cap_send_disconn_req(chan, ECONNRESET);
5660 			break;
5661 		}
5662 		break;
5663 	case L2CAP_EV_RECV_RR:
5664 		l2cap_pass_to_tx(chan, control);
5665 		if (control->final) {
5666 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5667 
5668 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
5669 			    !__chan_is_moving(chan)) {
5670 				control->final = 0;
5671 				l2cap_retransmit_all(chan, control);
5672 			}
5673 
5674 			l2cap_ertm_send(chan);
5675 		} else if (control->poll) {
5676 			l2cap_send_i_or_rr_or_rnr(chan);
5677 		} else {
5678 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5679 					       &chan->conn_state) &&
5680 			    chan->unacked_frames)
5681 				__set_retrans_timer(chan);
5682 
5683 			l2cap_ertm_send(chan);
5684 		}
5685 		break;
5686 	case L2CAP_EV_RECV_RNR:
5687 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5688 		l2cap_pass_to_tx(chan, control);
5689 		if (control && control->poll) {
5690 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5691 			l2cap_send_rr_or_rnr(chan, 0);
5692 		}
5693 		__clear_retrans_timer(chan);
5694 		l2cap_seq_list_clear(&chan->retrans_list);
5695 		break;
5696 	case L2CAP_EV_RECV_REJ:
5697 		l2cap_handle_rej(chan, control);
5698 		break;
5699 	case L2CAP_EV_RECV_SREJ:
5700 		l2cap_handle_srej(chan, control);
5701 		break;
5702 	default:
5703 		break;
5704 	}
5705 
5706 	if (skb && !skb_in_use) {
5707 		BT_DBG("Freeing %p", skb);
5708 		kfree_skb(skb);
5709 	}
5710 
5711 	return err;
5712 }
5713 
5714 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
5715 				    struct l2cap_ctrl *control,
5716 				    struct sk_buff *skb, u8 event)
5717 {
5718 	int err = 0;
5719 	u16 txseq = control->txseq;
5720 	bool skb_in_use = 0;
5721 
5722 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5723 	       event);
5724 
5725 	switch (event) {
5726 	case L2CAP_EV_RECV_IFRAME:
5727 		switch (l2cap_classify_txseq(chan, txseq)) {
5728 		case L2CAP_TXSEQ_EXPECTED:
5729 			/* Keep frame for reassembly later */
5730 			l2cap_pass_to_tx(chan, control);
5731 			skb_queue_tail(&chan->srej_q, skb);
5732 			skb_in_use = 1;
5733 			BT_DBG("Queued %p (queue len %d)", skb,
5734 			       skb_queue_len(&chan->srej_q));
5735 
5736 			chan->expected_tx_seq = __next_seq(chan, txseq);
5737 			break;
5738 		case L2CAP_TXSEQ_EXPECTED_SREJ:
5739 			l2cap_seq_list_pop(&chan->srej_list);
5740 
5741 			l2cap_pass_to_tx(chan, control);
5742 			skb_queue_tail(&chan->srej_q, skb);
5743 			skb_in_use = 1;
5744 			BT_DBG("Queued %p (queue len %d)", skb,
5745 			       skb_queue_len(&chan->srej_q));
5746 
5747 			err = l2cap_rx_queued_iframes(chan);
5748 			if (err)
5749 				break;
5750 
5751 			break;
5752 		case L2CAP_TXSEQ_UNEXPECTED:
5753 			/* Got a frame that can't be reassembled yet.
5754 			 * Save it for later, and send SREJs to cover
5755 			 * the missing frames.
5756 			 */
5757 			skb_queue_tail(&chan->srej_q, skb);
5758 			skb_in_use = 1;
5759 			BT_DBG("Queued %p (queue len %d)", skb,
5760 			       skb_queue_len(&chan->srej_q));
5761 
5762 			l2cap_pass_to_tx(chan, control);
5763 			l2cap_send_srej(chan, control->txseq);
5764 			break;
5765 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
5766 			/* This frame was requested with an SREJ, but
5767 			 * some expected retransmitted frames are
5768 			 * missing.  Request retransmission of missing
5769 			 * SREJ'd frames.
5770 			 */
5771 			skb_queue_tail(&chan->srej_q, skb);
5772 			skb_in_use = 1;
5773 			BT_DBG("Queued %p (queue len %d)", skb,
5774 			       skb_queue_len(&chan->srej_q));
5775 
5776 			l2cap_pass_to_tx(chan, control);
5777 			l2cap_send_srej_list(chan, control->txseq);
5778 			break;
5779 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
5780 			/* We've already queued this frame.  Drop this copy. */
5781 			l2cap_pass_to_tx(chan, control);
5782 			break;
5783 		case L2CAP_TXSEQ_DUPLICATE:
5784 			/* Expecting a later sequence number, so this frame
5785 			 * was already received.  Ignore it completely.
5786 			 */
5787 			break;
5788 		case L2CAP_TXSEQ_INVALID_IGNORE:
5789 			break;
5790 		case L2CAP_TXSEQ_INVALID:
5791 		default:
5792 			l2cap_send_disconn_req(chan, ECONNRESET);
5793 			break;
5794 		}
5795 		break;
5796 	case L2CAP_EV_RECV_RR:
5797 		l2cap_pass_to_tx(chan, control);
5798 		if (control->final) {
5799 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5800 
5801 			if (!test_and_clear_bit(CONN_REJ_ACT,
5802 						&chan->conn_state)) {
5803 				control->final = 0;
5804 				l2cap_retransmit_all(chan, control);
5805 			}
5806 
5807 			l2cap_ertm_send(chan);
5808 		} else if (control->poll) {
5809 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5810 					       &chan->conn_state) &&
5811 			    chan->unacked_frames) {
5812 				__set_retrans_timer(chan);
5813 			}
5814 
5815 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
5816 			l2cap_send_srej_tail(chan);
5817 		} else {
5818 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
5819 					       &chan->conn_state) &&
5820 			    chan->unacked_frames)
5821 				__set_retrans_timer(chan);
5822 
5823 			l2cap_send_ack(chan);
5824 		}
5825 		break;
5826 	case L2CAP_EV_RECV_RNR:
5827 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5828 		l2cap_pass_to_tx(chan, control);
5829 		if (control->poll) {
5830 			l2cap_send_srej_tail(chan);
5831 		} else {
5832 			struct l2cap_ctrl rr_control;
5833 			memset(&rr_control, 0, sizeof(rr_control));
5834 			rr_control.sframe = 1;
5835 			rr_control.super = L2CAP_SUPER_RR;
5836 			rr_control.reqseq = chan->buffer_seq;
5837 			l2cap_send_sframe(chan, &rr_control);
5838 		}
5839 
5840 		break;
5841 	case L2CAP_EV_RECV_REJ:
5842 		l2cap_handle_rej(chan, control);
5843 		break;
5844 	case L2CAP_EV_RECV_SREJ:
5845 		l2cap_handle_srej(chan, control);
5846 		break;
5847 	}
5848 
5849 	if (skb && !skb_in_use) {
5850 		BT_DBG("Freeing %p", skb);
5851 		kfree_skb(skb);
5852 	}
5853 
5854 	return err;
5855 }
5856 
5857 static int l2cap_finish_move(struct l2cap_chan *chan)
5858 {
5859 	BT_DBG("chan %p", chan);
5860 
5861 	chan->rx_state = L2CAP_RX_STATE_RECV;
5862 
5863 	if (chan->hs_hcon)
5864 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5865 	else
5866 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5867 
5868 	return l2cap_resegment(chan);
5869 }
5870 
5871 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
5872 				 struct l2cap_ctrl *control,
5873 				 struct sk_buff *skb, u8 event)
5874 {
5875 	int err;
5876 
5877 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5878 	       event);
5879 
5880 	if (!control->poll)
5881 		return -EPROTO;
5882 
5883 	l2cap_process_reqseq(chan, control->reqseq);
5884 
5885 	if (!skb_queue_empty(&chan->tx_q))
5886 		chan->tx_send_head = skb_peek(&chan->tx_q);
5887 	else
5888 		chan->tx_send_head = NULL;
5889 
5890 	/* Rewind next_tx_seq to the point expected
5891 	 * by the receiver.
5892 	 */
5893 	chan->next_tx_seq = control->reqseq;
5894 	chan->unacked_frames = 0;
5895 
5896 	err = l2cap_finish_move(chan);
5897 	if (err)
5898 		return err;
5899 
5900 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5901 	l2cap_send_i_or_rr_or_rnr(chan);
5902 
5903 	if (event == L2CAP_EV_RECV_IFRAME)
5904 		return -EPROTO;
5905 
5906 	return l2cap_rx_state_recv(chan, control, NULL, event);
5907 }
5908 
5909 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
5910 				 struct l2cap_ctrl *control,
5911 				 struct sk_buff *skb, u8 event)
5912 {
5913 	int err;
5914 
5915 	if (!control->final)
5916 		return -EPROTO;
5917 
5918 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5919 
5920 	chan->rx_state = L2CAP_RX_STATE_RECV;
5921 	l2cap_process_reqseq(chan, control->reqseq);
5922 
5923 	if (!skb_queue_empty(&chan->tx_q))
5924 		chan->tx_send_head = skb_peek(&chan->tx_q);
5925 	else
5926 		chan->tx_send_head = NULL;
5927 
5928 	/* Rewind next_tx_seq to the point expected
5929 	 * by the receiver.
5930 	 */
5931 	chan->next_tx_seq = control->reqseq;
5932 	chan->unacked_frames = 0;
5933 
5934 	if (chan->hs_hcon)
5935 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
5936 	else
5937 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
5938 
5939 	err = l2cap_resegment(chan);
5940 
5941 	if (!err)
5942 		err = l2cap_rx_state_recv(chan, control, skb, event);
5943 
5944 	return err;
5945 }
5946 
5947 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5948 {
5949 	/* Make sure reqseq is for a packet that has been sent but not acked */
5950 	u16 unacked;
5951 
5952 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5953 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5954 }
5955 
5956 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5957 		    struct sk_buff *skb, u8 event)
5958 {
5959 	int err = 0;
5960 
5961 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5962 	       control, skb, event, chan->rx_state);
5963 
5964 	if (__valid_reqseq(chan, control->reqseq)) {
5965 		switch (chan->rx_state) {
5966 		case L2CAP_RX_STATE_RECV:
5967 			err = l2cap_rx_state_recv(chan, control, skb, event);
5968 			break;
5969 		case L2CAP_RX_STATE_SREJ_SENT:
5970 			err = l2cap_rx_state_srej_sent(chan, control, skb,
5971 						       event);
5972 			break;
5973 		case L2CAP_RX_STATE_WAIT_P:
5974 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
5975 			break;
5976 		case L2CAP_RX_STATE_WAIT_F:
5977 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
5978 			break;
5979 		default:
5980 			/* shut it down */
5981 			break;
5982 		}
5983 	} else {
5984 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5985 		       control->reqseq, chan->next_tx_seq,
5986 		       chan->expected_ack_seq);
5987 		l2cap_send_disconn_req(chan, ECONNRESET);
5988 	}
5989 
5990 	return err;
5991 }
5992 
5993 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5994 			   struct sk_buff *skb)
5995 {
5996 	int err = 0;
5997 
5998 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5999 	       chan->rx_state);
6000 
6001 	if (l2cap_classify_txseq(chan, control->txseq) ==
6002 	    L2CAP_TXSEQ_EXPECTED) {
6003 		l2cap_pass_to_tx(chan, control);
6004 
6005 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6006 		       __next_seq(chan, chan->buffer_seq));
6007 
6008 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6009 
6010 		l2cap_reassemble_sdu(chan, skb, control);
6011 	} else {
6012 		if (chan->sdu) {
6013 			kfree_skb(chan->sdu);
6014 			chan->sdu = NULL;
6015 		}
6016 		chan->sdu_last_frag = NULL;
6017 		chan->sdu_len = 0;
6018 
6019 		if (skb) {
6020 			BT_DBG("Freeing %p", skb);
6021 			kfree_skb(skb);
6022 		}
6023 	}
6024 
6025 	chan->last_acked_seq = control->txseq;
6026 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6027 
6028 	return err;
6029 }
6030 
6031 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6032 {
6033 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6034 	u16 len;
6035 	u8 event;
6036 
6037 	__unpack_control(chan, skb);
6038 
6039 	len = skb->len;
6040 
6041 	/*
6042 	 * We can just drop the corrupted I-frame here.
6043 	 * Receiver will miss it and start proper recovery
6044 	 * procedures and ask for retransmission.
6045 	 */
6046 	if (l2cap_check_fcs(chan, skb))
6047 		goto drop;
6048 
6049 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6050 		len -= L2CAP_SDULEN_SIZE;
6051 
6052 	if (chan->fcs == L2CAP_FCS_CRC16)
6053 		len -= L2CAP_FCS_SIZE;
6054 
6055 	if (len > chan->mps) {
6056 		l2cap_send_disconn_req(chan, ECONNRESET);
6057 		goto drop;
6058 	}
6059 
6060 	if (!control->sframe) {
6061 		int err;
6062 
6063 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6064 		       control->sar, control->reqseq, control->final,
6065 		       control->txseq);
6066 
6067 		/* Validate F-bit - F=0 always valid, F=1 only
6068 		 * valid in TX WAIT_F
6069 		 */
6070 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6071 			goto drop;
6072 
6073 		if (chan->mode != L2CAP_MODE_STREAMING) {
6074 			event = L2CAP_EV_RECV_IFRAME;
6075 			err = l2cap_rx(chan, control, skb, event);
6076 		} else {
6077 			err = l2cap_stream_rx(chan, control, skb);
6078 		}
6079 
6080 		if (err)
6081 			l2cap_send_disconn_req(chan, ECONNRESET);
6082 	} else {
6083 		const u8 rx_func_to_event[4] = {
6084 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6085 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6086 		};
6087 
6088 		/* Only I-frames are expected in streaming mode */
6089 		if (chan->mode == L2CAP_MODE_STREAMING)
6090 			goto drop;
6091 
6092 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6093 		       control->reqseq, control->final, control->poll,
6094 		       control->super);
6095 
6096 		if (len != 0) {
6097 			BT_ERR("Trailing bytes: %d in sframe", len);
6098 			l2cap_send_disconn_req(chan, ECONNRESET);
6099 			goto drop;
6100 		}
6101 
6102 		/* Validate F and P bits */
6103 		if (control->final && (control->poll ||
6104 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6105 			goto drop;
6106 
6107 		event = rx_func_to_event[control->super];
6108 		if (l2cap_rx(chan, control, skb, event))
6109 			l2cap_send_disconn_req(chan, ECONNRESET);
6110 	}
6111 
6112 	return 0;
6113 
6114 drop:
6115 	kfree_skb(skb);
6116 	return 0;
6117 }
6118 
6119 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6120 			       struct sk_buff *skb)
6121 {
6122 	struct l2cap_chan *chan;
6123 
6124 	chan = l2cap_get_chan_by_scid(conn, cid);
6125 	if (!chan) {
6126 		if (cid == L2CAP_CID_A2MP) {
6127 			chan = a2mp_channel_create(conn, skb);
6128 			if (!chan) {
6129 				kfree_skb(skb);
6130 				return;
6131 			}
6132 
6133 			l2cap_chan_lock(chan);
6134 		} else {
6135 			BT_DBG("unknown cid 0x%4.4x", cid);
6136 			/* Drop packet and return */
6137 			kfree_skb(skb);
6138 			return;
6139 		}
6140 	}
6141 
6142 	BT_DBG("chan %p, len %d", chan, skb->len);
6143 
6144 	if (chan->state != BT_CONNECTED)
6145 		goto drop;
6146 
6147 	switch (chan->mode) {
6148 	case L2CAP_MODE_BASIC:
6149 		/* If socket recv buffers overflows we drop data here
6150 		 * which is *bad* because L2CAP has to be reliable.
6151 		 * But we don't have any other choice. L2CAP doesn't
6152 		 * provide flow control mechanism. */
6153 
6154 		if (chan->imtu < skb->len)
6155 			goto drop;
6156 
6157 		if (!chan->ops->recv(chan, skb))
6158 			goto done;
6159 		break;
6160 
6161 	case L2CAP_MODE_ERTM:
6162 	case L2CAP_MODE_STREAMING:
6163 		l2cap_data_rcv(chan, skb);
6164 		goto done;
6165 
6166 	default:
6167 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6168 		break;
6169 	}
6170 
6171 drop:
6172 	kfree_skb(skb);
6173 
6174 done:
6175 	l2cap_chan_unlock(chan);
6176 }
6177 
6178 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6179 				  struct sk_buff *skb)
6180 {
6181 	struct l2cap_chan *chan;
6182 
6183 	chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
6184 	if (!chan)
6185 		goto drop;
6186 
6187 	BT_DBG("chan %p, len %d", chan, skb->len);
6188 
6189 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6190 		goto drop;
6191 
6192 	if (chan->imtu < skb->len)
6193 		goto drop;
6194 
6195 	if (!chan->ops->recv(chan, skb))
6196 		return;
6197 
6198 drop:
6199 	kfree_skb(skb);
6200 }
6201 
6202 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
6203 			      struct sk_buff *skb)
6204 {
6205 	struct l2cap_chan *chan;
6206 
6207 	chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
6208 	if (!chan)
6209 		goto drop;
6210 
6211 	BT_DBG("chan %p, len %d", chan, skb->len);
6212 
6213 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6214 		goto drop;
6215 
6216 	if (chan->imtu < skb->len)
6217 		goto drop;
6218 
6219 	if (!chan->ops->recv(chan, skb))
6220 		return;
6221 
6222 drop:
6223 	kfree_skb(skb);
6224 }
6225 
6226 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6227 {
6228 	struct l2cap_hdr *lh = (void *) skb->data;
6229 	u16 cid, len;
6230 	__le16 psm;
6231 
6232 	skb_pull(skb, L2CAP_HDR_SIZE);
6233 	cid = __le16_to_cpu(lh->cid);
6234 	len = __le16_to_cpu(lh->len);
6235 
6236 	if (len != skb->len) {
6237 		kfree_skb(skb);
6238 		return;
6239 	}
6240 
6241 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6242 
6243 	switch (cid) {
6244 	case L2CAP_CID_LE_SIGNALING:
6245 	case L2CAP_CID_SIGNALING:
6246 		l2cap_sig_channel(conn, skb);
6247 		break;
6248 
6249 	case L2CAP_CID_CONN_LESS:
6250 		psm = get_unaligned((__le16 *) skb->data);
6251 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6252 		l2cap_conless_channel(conn, psm, skb);
6253 		break;
6254 
6255 	case L2CAP_CID_LE_DATA:
6256 		l2cap_att_channel(conn, cid, skb);
6257 		break;
6258 
6259 	case L2CAP_CID_SMP:
6260 		if (smp_sig_channel(conn, skb))
6261 			l2cap_conn_del(conn->hcon, EACCES);
6262 		break;
6263 
6264 	default:
6265 		l2cap_data_channel(conn, cid, skb);
6266 		break;
6267 	}
6268 }
6269 
6270 /* ---- L2CAP interface with lower layer (HCI) ---- */
6271 
6272 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
6273 {
6274 	int exact = 0, lm1 = 0, lm2 = 0;
6275 	struct l2cap_chan *c;
6276 
6277 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
6278 
6279 	/* Find listening sockets and check their link_mode */
6280 	read_lock(&chan_list_lock);
6281 	list_for_each_entry(c, &chan_list, global_l) {
6282 		struct sock *sk = c->sk;
6283 
6284 		if (c->state != BT_LISTEN)
6285 			continue;
6286 
6287 		if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
6288 			lm1 |= HCI_LM_ACCEPT;
6289 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6290 				lm1 |= HCI_LM_MASTER;
6291 			exact++;
6292 		} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
6293 			lm2 |= HCI_LM_ACCEPT;
6294 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
6295 				lm2 |= HCI_LM_MASTER;
6296 		}
6297 	}
6298 	read_unlock(&chan_list_lock);
6299 
6300 	return exact ? lm1 : lm2;
6301 }
6302 
6303 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
6304 {
6305 	struct l2cap_conn *conn;
6306 
6307 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
6308 
6309 	if (!status) {
6310 		conn = l2cap_conn_add(hcon, status);
6311 		if (conn)
6312 			l2cap_conn_ready(conn);
6313 	} else {
6314 		l2cap_conn_del(hcon, bt_to_errno(status));
6315 	}
6316 }
6317 
6318 int l2cap_disconn_ind(struct hci_conn *hcon)
6319 {
6320 	struct l2cap_conn *conn = hcon->l2cap_data;
6321 
6322 	BT_DBG("hcon %p", hcon);
6323 
6324 	if (!conn)
6325 		return HCI_ERROR_REMOTE_USER_TERM;
6326 	return conn->disc_reason;
6327 }
6328 
6329 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
6330 {
6331 	BT_DBG("hcon %p reason %d", hcon, reason);
6332 
6333 	l2cap_conn_del(hcon, bt_to_errno(reason));
6334 }
6335 
6336 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
6337 {
6338 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
6339 		return;
6340 
6341 	if (encrypt == 0x00) {
6342 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
6343 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
6344 		} else if (chan->sec_level == BT_SECURITY_HIGH)
6345 			l2cap_chan_close(chan, ECONNREFUSED);
6346 	} else {
6347 		if (chan->sec_level == BT_SECURITY_MEDIUM)
6348 			__clear_chan_timer(chan);
6349 	}
6350 }
6351 
6352 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
6353 {
6354 	struct l2cap_conn *conn = hcon->l2cap_data;
6355 	struct l2cap_chan *chan;
6356 
6357 	if (!conn)
6358 		return 0;
6359 
6360 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
6361 
6362 	if (hcon->type == LE_LINK) {
6363 		if (!status && encrypt)
6364 			smp_distribute_keys(conn, 0);
6365 		cancel_delayed_work(&conn->security_timer);
6366 	}
6367 
6368 	mutex_lock(&conn->chan_lock);
6369 
6370 	list_for_each_entry(chan, &conn->chan_l, list) {
6371 		l2cap_chan_lock(chan);
6372 
6373 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
6374 		       state_to_string(chan->state));
6375 
6376 		if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
6377 			l2cap_chan_unlock(chan);
6378 			continue;
6379 		}
6380 
6381 		if (chan->scid == L2CAP_CID_LE_DATA) {
6382 			if (!status && encrypt) {
6383 				chan->sec_level = hcon->sec_level;
6384 				l2cap_chan_ready(chan);
6385 			}
6386 
6387 			l2cap_chan_unlock(chan);
6388 			continue;
6389 		}
6390 
6391 		if (!__l2cap_no_conn_pending(chan)) {
6392 			l2cap_chan_unlock(chan);
6393 			continue;
6394 		}
6395 
6396 		if (!status && (chan->state == BT_CONNECTED ||
6397 				chan->state == BT_CONFIG)) {
6398 			struct sock *sk = chan->sk;
6399 
6400 			clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
6401 			sk->sk_state_change(sk);
6402 
6403 			l2cap_check_encryption(chan, encrypt);
6404 			l2cap_chan_unlock(chan);
6405 			continue;
6406 		}
6407 
6408 		if (chan->state == BT_CONNECT) {
6409 			if (!status) {
6410 				l2cap_start_connection(chan);
6411 			} else {
6412 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6413 			}
6414 		} else if (chan->state == BT_CONNECT2) {
6415 			struct sock *sk = chan->sk;
6416 			struct l2cap_conn_rsp rsp;
6417 			__u16 res, stat;
6418 
6419 			lock_sock(sk);
6420 
6421 			if (!status) {
6422 				if (test_bit(BT_SK_DEFER_SETUP,
6423 					     &bt_sk(sk)->flags)) {
6424 					res = L2CAP_CR_PEND;
6425 					stat = L2CAP_CS_AUTHOR_PEND;
6426 					chan->ops->defer(chan);
6427 				} else {
6428 					__l2cap_state_change(chan, BT_CONFIG);
6429 					res = L2CAP_CR_SUCCESS;
6430 					stat = L2CAP_CS_NO_INFO;
6431 				}
6432 			} else {
6433 				__l2cap_state_change(chan, BT_DISCONN);
6434 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
6435 				res = L2CAP_CR_SEC_BLOCK;
6436 				stat = L2CAP_CS_NO_INFO;
6437 			}
6438 
6439 			release_sock(sk);
6440 
6441 			rsp.scid   = cpu_to_le16(chan->dcid);
6442 			rsp.dcid   = cpu_to_le16(chan->scid);
6443 			rsp.result = cpu_to_le16(res);
6444 			rsp.status = cpu_to_le16(stat);
6445 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
6446 				       sizeof(rsp), &rsp);
6447 
6448 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
6449 			    res == L2CAP_CR_SUCCESS) {
6450 				char buf[128];
6451 				set_bit(CONF_REQ_SENT, &chan->conf_state);
6452 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
6453 					       L2CAP_CONF_REQ,
6454 					       l2cap_build_conf_req(chan, buf),
6455 					       buf);
6456 				chan->num_conf_req++;
6457 			}
6458 		}
6459 
6460 		l2cap_chan_unlock(chan);
6461 	}
6462 
6463 	mutex_unlock(&conn->chan_lock);
6464 
6465 	return 0;
6466 }
6467 
6468 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
6469 {
6470 	struct l2cap_conn *conn = hcon->l2cap_data;
6471 	struct l2cap_hdr *hdr;
6472 	int len;
6473 
6474 	/* For AMP controller do not create l2cap conn */
6475 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
6476 		goto drop;
6477 
6478 	if (!conn)
6479 		conn = l2cap_conn_add(hcon, 0);
6480 
6481 	if (!conn)
6482 		goto drop;
6483 
6484 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
6485 
6486 	switch (flags) {
6487 	case ACL_START:
6488 	case ACL_START_NO_FLUSH:
6489 	case ACL_COMPLETE:
6490 		if (conn->rx_len) {
6491 			BT_ERR("Unexpected start frame (len %d)", skb->len);
6492 			kfree_skb(conn->rx_skb);
6493 			conn->rx_skb = NULL;
6494 			conn->rx_len = 0;
6495 			l2cap_conn_unreliable(conn, ECOMM);
6496 		}
6497 
6498 		/* Start fragment always begin with Basic L2CAP header */
6499 		if (skb->len < L2CAP_HDR_SIZE) {
6500 			BT_ERR("Frame is too short (len %d)", skb->len);
6501 			l2cap_conn_unreliable(conn, ECOMM);
6502 			goto drop;
6503 		}
6504 
6505 		hdr = (struct l2cap_hdr *) skb->data;
6506 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
6507 
6508 		if (len == skb->len) {
6509 			/* Complete frame received */
6510 			l2cap_recv_frame(conn, skb);
6511 			return 0;
6512 		}
6513 
6514 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
6515 
6516 		if (skb->len > len) {
6517 			BT_ERR("Frame is too long (len %d, expected len %d)",
6518 			       skb->len, len);
6519 			l2cap_conn_unreliable(conn, ECOMM);
6520 			goto drop;
6521 		}
6522 
6523 		/* Allocate skb for the complete frame (with header) */
6524 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
6525 		if (!conn->rx_skb)
6526 			goto drop;
6527 
6528 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6529 					  skb->len);
6530 		conn->rx_len = len - skb->len;
6531 		break;
6532 
6533 	case ACL_CONT:
6534 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
6535 
6536 		if (!conn->rx_len) {
6537 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
6538 			l2cap_conn_unreliable(conn, ECOMM);
6539 			goto drop;
6540 		}
6541 
6542 		if (skb->len > conn->rx_len) {
6543 			BT_ERR("Fragment is too long (len %d, expected %d)",
6544 			       skb->len, conn->rx_len);
6545 			kfree_skb(conn->rx_skb);
6546 			conn->rx_skb = NULL;
6547 			conn->rx_len = 0;
6548 			l2cap_conn_unreliable(conn, ECOMM);
6549 			goto drop;
6550 		}
6551 
6552 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
6553 					  skb->len);
6554 		conn->rx_len -= skb->len;
6555 
6556 		if (!conn->rx_len) {
6557 			/* Complete frame received */
6558 			l2cap_recv_frame(conn, conn->rx_skb);
6559 			conn->rx_skb = NULL;
6560 		}
6561 		break;
6562 	}
6563 
6564 drop:
6565 	kfree_skb(skb);
6566 	return 0;
6567 }
6568 
6569 static int l2cap_debugfs_show(struct seq_file *f, void *p)
6570 {
6571 	struct l2cap_chan *c;
6572 
6573 	read_lock(&chan_list_lock);
6574 
6575 	list_for_each_entry(c, &chan_list, global_l) {
6576 		struct sock *sk = c->sk;
6577 
6578 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6579 			   &bt_sk(sk)->src, &bt_sk(sk)->dst,
6580 			   c->state, __le16_to_cpu(c->psm),
6581 			   c->scid, c->dcid, c->imtu, c->omtu,
6582 			   c->sec_level, c->mode);
6583 	}
6584 
6585 	read_unlock(&chan_list_lock);
6586 
6587 	return 0;
6588 }
6589 
6590 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
6591 {
6592 	return single_open(file, l2cap_debugfs_show, inode->i_private);
6593 }
6594 
6595 static const struct file_operations l2cap_debugfs_fops = {
6596 	.open		= l2cap_debugfs_open,
6597 	.read		= seq_read,
6598 	.llseek		= seq_lseek,
6599 	.release	= single_release,
6600 };
6601 
6602 static struct dentry *l2cap_debugfs;
6603 
6604 int __init l2cap_init(void)
6605 {
6606 	int err;
6607 
6608 	err = l2cap_init_sockets();
6609 	if (err < 0)
6610 		return err;
6611 
6612 	if (bt_debugfs) {
6613 		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
6614 						    NULL, &l2cap_debugfs_fops);
6615 		if (!l2cap_debugfs)
6616 			BT_ERR("Failed to create L2CAP debug file");
6617 	}
6618 
6619 	return 0;
6620 }
6621 
6622 void l2cap_exit(void)
6623 {
6624 	debugfs_remove(l2cap_debugfs);
6625 	l2cap_cleanup_sockets();
6626 }
6627 
6628 module_param(disable_ertm, bool, 0644);
6629 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
6630