xref: /linux/net/bluetooth/mgmt.c (revision 7ad24ea4bf620a32631d7b3069c3e30c078b0c3e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 
34 #include "smp.h"
35 
36 #define MGMT_VERSION	1
37 #define MGMT_REVISION	5
38 
39 static const u16 mgmt_commands[] = {
40 	MGMT_OP_READ_INDEX_LIST,
41 	MGMT_OP_READ_INFO,
42 	MGMT_OP_SET_POWERED,
43 	MGMT_OP_SET_DISCOVERABLE,
44 	MGMT_OP_SET_CONNECTABLE,
45 	MGMT_OP_SET_FAST_CONNECTABLE,
46 	MGMT_OP_SET_PAIRABLE,
47 	MGMT_OP_SET_LINK_SECURITY,
48 	MGMT_OP_SET_SSP,
49 	MGMT_OP_SET_HS,
50 	MGMT_OP_SET_LE,
51 	MGMT_OP_SET_DEV_CLASS,
52 	MGMT_OP_SET_LOCAL_NAME,
53 	MGMT_OP_ADD_UUID,
54 	MGMT_OP_REMOVE_UUID,
55 	MGMT_OP_LOAD_LINK_KEYS,
56 	MGMT_OP_LOAD_LONG_TERM_KEYS,
57 	MGMT_OP_DISCONNECT,
58 	MGMT_OP_GET_CONNECTIONS,
59 	MGMT_OP_PIN_CODE_REPLY,
60 	MGMT_OP_PIN_CODE_NEG_REPLY,
61 	MGMT_OP_SET_IO_CAPABILITY,
62 	MGMT_OP_PAIR_DEVICE,
63 	MGMT_OP_CANCEL_PAIR_DEVICE,
64 	MGMT_OP_UNPAIR_DEVICE,
65 	MGMT_OP_USER_CONFIRM_REPLY,
66 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 	MGMT_OP_USER_PASSKEY_REPLY,
68 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 	MGMT_OP_READ_LOCAL_OOB_DATA,
70 	MGMT_OP_ADD_REMOTE_OOB_DATA,
71 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 	MGMT_OP_START_DISCOVERY,
73 	MGMT_OP_STOP_DISCOVERY,
74 	MGMT_OP_CONFIRM_NAME,
75 	MGMT_OP_BLOCK_DEVICE,
76 	MGMT_OP_UNBLOCK_DEVICE,
77 	MGMT_OP_SET_DEVICE_ID,
78 	MGMT_OP_SET_ADVERTISING,
79 	MGMT_OP_SET_BREDR,
80 	MGMT_OP_SET_STATIC_ADDRESS,
81 	MGMT_OP_SET_SCAN_PARAMS,
82 	MGMT_OP_SET_SECURE_CONN,
83 	MGMT_OP_SET_DEBUG_KEYS,
84 	MGMT_OP_SET_PRIVACY,
85 	MGMT_OP_LOAD_IRKS,
86 };
87 
88 static const u16 mgmt_events[] = {
89 	MGMT_EV_CONTROLLER_ERROR,
90 	MGMT_EV_INDEX_ADDED,
91 	MGMT_EV_INDEX_REMOVED,
92 	MGMT_EV_NEW_SETTINGS,
93 	MGMT_EV_CLASS_OF_DEV_CHANGED,
94 	MGMT_EV_LOCAL_NAME_CHANGED,
95 	MGMT_EV_NEW_LINK_KEY,
96 	MGMT_EV_NEW_LONG_TERM_KEY,
97 	MGMT_EV_DEVICE_CONNECTED,
98 	MGMT_EV_DEVICE_DISCONNECTED,
99 	MGMT_EV_CONNECT_FAILED,
100 	MGMT_EV_PIN_CODE_REQUEST,
101 	MGMT_EV_USER_CONFIRM_REQUEST,
102 	MGMT_EV_USER_PASSKEY_REQUEST,
103 	MGMT_EV_AUTH_FAILED,
104 	MGMT_EV_DEVICE_FOUND,
105 	MGMT_EV_DISCOVERING,
106 	MGMT_EV_DEVICE_BLOCKED,
107 	MGMT_EV_DEVICE_UNBLOCKED,
108 	MGMT_EV_DEVICE_UNPAIRED,
109 	MGMT_EV_PASSKEY_NOTIFY,
110 	MGMT_EV_NEW_IRK,
111 	MGMT_EV_NEW_CSRK,
112 };
113 
114 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
115 
116 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
117 				!test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
118 
119 struct pending_cmd {
120 	struct list_head list;
121 	u16 opcode;
122 	int index;
123 	void *param;
124 	struct sock *sk;
125 	void *user_data;
126 };
127 
128 /* HCI to MGMT error code conversion table */
129 static u8 mgmt_status_table[] = {
130 	MGMT_STATUS_SUCCESS,
131 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
132 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
133 	MGMT_STATUS_FAILED,		/* Hardware Failure */
134 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
135 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
136 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
137 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
138 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
139 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
140 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
141 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
142 	MGMT_STATUS_BUSY,		/* Command Disallowed */
143 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
144 	MGMT_STATUS_REJECTED,		/* Rejected Security */
145 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
146 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
147 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
148 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
149 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
150 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
151 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
152 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
153 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
154 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
155 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
156 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
157 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
158 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
159 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
160 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
161 	MGMT_STATUS_FAILED,		/* Unspecified Error */
162 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
163 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
164 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
165 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
166 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
167 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
168 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
169 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
170 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
171 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
172 	MGMT_STATUS_FAILED,		/* Transaction Collision */
173 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
174 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
175 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
176 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
177 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
178 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
179 	MGMT_STATUS_FAILED,		/* Slot Violation */
180 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
181 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
182 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
183 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
184 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
185 	MGMT_STATUS_BUSY,		/* Controller Busy */
186 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
187 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
188 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
189 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
190 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
191 };
192 
193 static u8 mgmt_status(u8 hci_status)
194 {
195 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
196 		return mgmt_status_table[hci_status];
197 
198 	return MGMT_STATUS_FAILED;
199 }
200 
201 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
202 {
203 	struct sk_buff *skb;
204 	struct mgmt_hdr *hdr;
205 	struct mgmt_ev_cmd_status *ev;
206 	int err;
207 
208 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
209 
210 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
211 	if (!skb)
212 		return -ENOMEM;
213 
214 	hdr = (void *) skb_put(skb, sizeof(*hdr));
215 
216 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
217 	hdr->index = cpu_to_le16(index);
218 	hdr->len = cpu_to_le16(sizeof(*ev));
219 
220 	ev = (void *) skb_put(skb, sizeof(*ev));
221 	ev->status = status;
222 	ev->opcode = cpu_to_le16(cmd);
223 
224 	err = sock_queue_rcv_skb(sk, skb);
225 	if (err < 0)
226 		kfree_skb(skb);
227 
228 	return err;
229 }
230 
231 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
232 			void *rp, size_t rp_len)
233 {
234 	struct sk_buff *skb;
235 	struct mgmt_hdr *hdr;
236 	struct mgmt_ev_cmd_complete *ev;
237 	int err;
238 
239 	BT_DBG("sock %p", sk);
240 
241 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
242 	if (!skb)
243 		return -ENOMEM;
244 
245 	hdr = (void *) skb_put(skb, sizeof(*hdr));
246 
247 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
248 	hdr->index = cpu_to_le16(index);
249 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
250 
251 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
252 	ev->opcode = cpu_to_le16(cmd);
253 	ev->status = status;
254 
255 	if (rp)
256 		memcpy(ev->data, rp, rp_len);
257 
258 	err = sock_queue_rcv_skb(sk, skb);
259 	if (err < 0)
260 		kfree_skb(skb);
261 
262 	return err;
263 }
264 
265 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
266 			u16 data_len)
267 {
268 	struct mgmt_rp_read_version rp;
269 
270 	BT_DBG("sock %p", sk);
271 
272 	rp.version = MGMT_VERSION;
273 	rp.revision = cpu_to_le16(MGMT_REVISION);
274 
275 	return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
276 			    sizeof(rp));
277 }
278 
279 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
280 			 u16 data_len)
281 {
282 	struct mgmt_rp_read_commands *rp;
283 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
284 	const u16 num_events = ARRAY_SIZE(mgmt_events);
285 	__le16 *opcode;
286 	size_t rp_size;
287 	int i, err;
288 
289 	BT_DBG("sock %p", sk);
290 
291 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
292 
293 	rp = kmalloc(rp_size, GFP_KERNEL);
294 	if (!rp)
295 		return -ENOMEM;
296 
297 	rp->num_commands = cpu_to_le16(num_commands);
298 	rp->num_events = cpu_to_le16(num_events);
299 
300 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
301 		put_unaligned_le16(mgmt_commands[i], opcode);
302 
303 	for (i = 0; i < num_events; i++, opcode++)
304 		put_unaligned_le16(mgmt_events[i], opcode);
305 
306 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
307 			   rp_size);
308 	kfree(rp);
309 
310 	return err;
311 }
312 
313 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
314 			   u16 data_len)
315 {
316 	struct mgmt_rp_read_index_list *rp;
317 	struct hci_dev *d;
318 	size_t rp_len;
319 	u16 count;
320 	int err;
321 
322 	BT_DBG("sock %p", sk);
323 
324 	read_lock(&hci_dev_list_lock);
325 
326 	count = 0;
327 	list_for_each_entry(d, &hci_dev_list, list) {
328 		if (d->dev_type == HCI_BREDR)
329 			count++;
330 	}
331 
332 	rp_len = sizeof(*rp) + (2 * count);
333 	rp = kmalloc(rp_len, GFP_ATOMIC);
334 	if (!rp) {
335 		read_unlock(&hci_dev_list_lock);
336 		return -ENOMEM;
337 	}
338 
339 	count = 0;
340 	list_for_each_entry(d, &hci_dev_list, list) {
341 		if (test_bit(HCI_SETUP, &d->dev_flags))
342 			continue;
343 
344 		if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345 			continue;
346 
347 		if (d->dev_type == HCI_BREDR) {
348 			rp->index[count++] = cpu_to_le16(d->id);
349 			BT_DBG("Added hci%u", d->id);
350 		}
351 	}
352 
353 	rp->num_controllers = cpu_to_le16(count);
354 	rp_len = sizeof(*rp) + (2 * count);
355 
356 	read_unlock(&hci_dev_list_lock);
357 
358 	err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
359 			   rp_len);
360 
361 	kfree(rp);
362 
363 	return err;
364 }
365 
366 static u32 get_supported_settings(struct hci_dev *hdev)
367 {
368 	u32 settings = 0;
369 
370 	settings |= MGMT_SETTING_POWERED;
371 	settings |= MGMT_SETTING_PAIRABLE;
372 	settings |= MGMT_SETTING_DEBUG_KEYS;
373 
374 	if (lmp_bredr_capable(hdev)) {
375 		settings |= MGMT_SETTING_CONNECTABLE;
376 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
377 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
378 		settings |= MGMT_SETTING_DISCOVERABLE;
379 		settings |= MGMT_SETTING_BREDR;
380 		settings |= MGMT_SETTING_LINK_SECURITY;
381 
382 		if (lmp_ssp_capable(hdev)) {
383 			settings |= MGMT_SETTING_SSP;
384 			settings |= MGMT_SETTING_HS;
385 		}
386 
387 		if (lmp_sc_capable(hdev) ||
388 		    test_bit(HCI_FORCE_SC, &hdev->dev_flags))
389 			settings |= MGMT_SETTING_SECURE_CONN;
390 	}
391 
392 	if (lmp_le_capable(hdev)) {
393 		settings |= MGMT_SETTING_LE;
394 		settings |= MGMT_SETTING_ADVERTISING;
395 		settings |= MGMT_SETTING_PRIVACY;
396 	}
397 
398 	return settings;
399 }
400 
401 static u32 get_current_settings(struct hci_dev *hdev)
402 {
403 	u32 settings = 0;
404 
405 	if (hdev_is_powered(hdev))
406 		settings |= MGMT_SETTING_POWERED;
407 
408 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
409 		settings |= MGMT_SETTING_CONNECTABLE;
410 
411 	if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
412 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
413 
414 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
415 		settings |= MGMT_SETTING_DISCOVERABLE;
416 
417 	if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
418 		settings |= MGMT_SETTING_PAIRABLE;
419 
420 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
421 		settings |= MGMT_SETTING_BREDR;
422 
423 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
424 		settings |= MGMT_SETTING_LE;
425 
426 	if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
427 		settings |= MGMT_SETTING_LINK_SECURITY;
428 
429 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
430 		settings |= MGMT_SETTING_SSP;
431 
432 	if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
433 		settings |= MGMT_SETTING_HS;
434 
435 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
436 		settings |= MGMT_SETTING_ADVERTISING;
437 
438 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
439 		settings |= MGMT_SETTING_SECURE_CONN;
440 
441 	if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
442 		settings |= MGMT_SETTING_DEBUG_KEYS;
443 
444 	if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
445 		settings |= MGMT_SETTING_PRIVACY;
446 
447 	return settings;
448 }
449 
450 #define PNP_INFO_SVCLASS_ID		0x1200
451 
452 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
453 {
454 	u8 *ptr = data, *uuids_start = NULL;
455 	struct bt_uuid *uuid;
456 
457 	if (len < 4)
458 		return ptr;
459 
460 	list_for_each_entry(uuid, &hdev->uuids, list) {
461 		u16 uuid16;
462 
463 		if (uuid->size != 16)
464 			continue;
465 
466 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
467 		if (uuid16 < 0x1100)
468 			continue;
469 
470 		if (uuid16 == PNP_INFO_SVCLASS_ID)
471 			continue;
472 
473 		if (!uuids_start) {
474 			uuids_start = ptr;
475 			uuids_start[0] = 1;
476 			uuids_start[1] = EIR_UUID16_ALL;
477 			ptr += 2;
478 		}
479 
480 		/* Stop if not enough space to put next UUID */
481 		if ((ptr - data) + sizeof(u16) > len) {
482 			uuids_start[1] = EIR_UUID16_SOME;
483 			break;
484 		}
485 
486 		*ptr++ = (uuid16 & 0x00ff);
487 		*ptr++ = (uuid16 & 0xff00) >> 8;
488 		uuids_start[0] += sizeof(uuid16);
489 	}
490 
491 	return ptr;
492 }
493 
494 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
495 {
496 	u8 *ptr = data, *uuids_start = NULL;
497 	struct bt_uuid *uuid;
498 
499 	if (len < 6)
500 		return ptr;
501 
502 	list_for_each_entry(uuid, &hdev->uuids, list) {
503 		if (uuid->size != 32)
504 			continue;
505 
506 		if (!uuids_start) {
507 			uuids_start = ptr;
508 			uuids_start[0] = 1;
509 			uuids_start[1] = EIR_UUID32_ALL;
510 			ptr += 2;
511 		}
512 
513 		/* Stop if not enough space to put next UUID */
514 		if ((ptr - data) + sizeof(u32) > len) {
515 			uuids_start[1] = EIR_UUID32_SOME;
516 			break;
517 		}
518 
519 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
520 		ptr += sizeof(u32);
521 		uuids_start[0] += sizeof(u32);
522 	}
523 
524 	return ptr;
525 }
526 
527 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528 {
529 	u8 *ptr = data, *uuids_start = NULL;
530 	struct bt_uuid *uuid;
531 
532 	if (len < 18)
533 		return ptr;
534 
535 	list_for_each_entry(uuid, &hdev->uuids, list) {
536 		if (uuid->size != 128)
537 			continue;
538 
539 		if (!uuids_start) {
540 			uuids_start = ptr;
541 			uuids_start[0] = 1;
542 			uuids_start[1] = EIR_UUID128_ALL;
543 			ptr += 2;
544 		}
545 
546 		/* Stop if not enough space to put next UUID */
547 		if ((ptr - data) + 16 > len) {
548 			uuids_start[1] = EIR_UUID128_SOME;
549 			break;
550 		}
551 
552 		memcpy(ptr, uuid->uuid, 16);
553 		ptr += 16;
554 		uuids_start[0] += 16;
555 	}
556 
557 	return ptr;
558 }
559 
560 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
561 {
562 	struct pending_cmd *cmd;
563 
564 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
565 		if (cmd->opcode == opcode)
566 			return cmd;
567 	}
568 
569 	return NULL;
570 }
571 
572 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
573 {
574 	u8 ad_len = 0;
575 	size_t name_len;
576 
577 	name_len = strlen(hdev->dev_name);
578 	if (name_len > 0) {
579 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
580 
581 		if (name_len > max_len) {
582 			name_len = max_len;
583 			ptr[1] = EIR_NAME_SHORT;
584 		} else
585 			ptr[1] = EIR_NAME_COMPLETE;
586 
587 		ptr[0] = name_len + 1;
588 
589 		memcpy(ptr + 2, hdev->dev_name, name_len);
590 
591 		ad_len += (name_len + 2);
592 		ptr += (name_len + 2);
593 	}
594 
595 	return ad_len;
596 }
597 
598 static void update_scan_rsp_data(struct hci_request *req)
599 {
600 	struct hci_dev *hdev = req->hdev;
601 	struct hci_cp_le_set_scan_rsp_data cp;
602 	u8 len;
603 
604 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
605 		return;
606 
607 	memset(&cp, 0, sizeof(cp));
608 
609 	len = create_scan_rsp_data(hdev, cp.data);
610 
611 	if (hdev->scan_rsp_data_len == len &&
612 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
613 		return;
614 
615 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
616 	hdev->scan_rsp_data_len = len;
617 
618 	cp.length = len;
619 
620 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
621 }
622 
623 static u8 get_adv_discov_flags(struct hci_dev *hdev)
624 {
625 	struct pending_cmd *cmd;
626 
627 	/* If there's a pending mgmt command the flags will not yet have
628 	 * their final values, so check for this first.
629 	 */
630 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
631 	if (cmd) {
632 		struct mgmt_mode *cp = cmd->param;
633 		if (cp->val == 0x01)
634 			return LE_AD_GENERAL;
635 		else if (cp->val == 0x02)
636 			return LE_AD_LIMITED;
637 	} else {
638 		if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
639 			return LE_AD_LIMITED;
640 		else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
641 			return LE_AD_GENERAL;
642 	}
643 
644 	return 0;
645 }
646 
647 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
648 {
649 	u8 ad_len = 0, flags = 0;
650 
651 	flags |= get_adv_discov_flags(hdev);
652 
653 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
654 		flags |= LE_AD_NO_BREDR;
655 
656 	if (flags) {
657 		BT_DBG("adv flags 0x%02x", flags);
658 
659 		ptr[0] = 2;
660 		ptr[1] = EIR_FLAGS;
661 		ptr[2] = flags;
662 
663 		ad_len += 3;
664 		ptr += 3;
665 	}
666 
667 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
668 		ptr[0] = 2;
669 		ptr[1] = EIR_TX_POWER;
670 		ptr[2] = (u8) hdev->adv_tx_power;
671 
672 		ad_len += 3;
673 		ptr += 3;
674 	}
675 
676 	return ad_len;
677 }
678 
679 static void update_adv_data(struct hci_request *req)
680 {
681 	struct hci_dev *hdev = req->hdev;
682 	struct hci_cp_le_set_adv_data cp;
683 	u8 len;
684 
685 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
686 		return;
687 
688 	memset(&cp, 0, sizeof(cp));
689 
690 	len = create_adv_data(hdev, cp.data);
691 
692 	if (hdev->adv_data_len == len &&
693 	    memcmp(cp.data, hdev->adv_data, len) == 0)
694 		return;
695 
696 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
697 	hdev->adv_data_len = len;
698 
699 	cp.length = len;
700 
701 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
702 }
703 
704 static void create_eir(struct hci_dev *hdev, u8 *data)
705 {
706 	u8 *ptr = data;
707 	size_t name_len;
708 
709 	name_len = strlen(hdev->dev_name);
710 
711 	if (name_len > 0) {
712 		/* EIR Data type */
713 		if (name_len > 48) {
714 			name_len = 48;
715 			ptr[1] = EIR_NAME_SHORT;
716 		} else
717 			ptr[1] = EIR_NAME_COMPLETE;
718 
719 		/* EIR Data length */
720 		ptr[0] = name_len + 1;
721 
722 		memcpy(ptr + 2, hdev->dev_name, name_len);
723 
724 		ptr += (name_len + 2);
725 	}
726 
727 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
728 		ptr[0] = 2;
729 		ptr[1] = EIR_TX_POWER;
730 		ptr[2] = (u8) hdev->inq_tx_power;
731 
732 		ptr += 3;
733 	}
734 
735 	if (hdev->devid_source > 0) {
736 		ptr[0] = 9;
737 		ptr[1] = EIR_DEVICE_ID;
738 
739 		put_unaligned_le16(hdev->devid_source, ptr + 2);
740 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
741 		put_unaligned_le16(hdev->devid_product, ptr + 6);
742 		put_unaligned_le16(hdev->devid_version, ptr + 8);
743 
744 		ptr += 10;
745 	}
746 
747 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
749 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
750 }
751 
752 static void update_eir(struct hci_request *req)
753 {
754 	struct hci_dev *hdev = req->hdev;
755 	struct hci_cp_write_eir cp;
756 
757 	if (!hdev_is_powered(hdev))
758 		return;
759 
760 	if (!lmp_ext_inq_capable(hdev))
761 		return;
762 
763 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
764 		return;
765 
766 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
767 		return;
768 
769 	memset(&cp, 0, sizeof(cp));
770 
771 	create_eir(hdev, cp.data);
772 
773 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
774 		return;
775 
776 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
777 
778 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
779 }
780 
781 static u8 get_service_classes(struct hci_dev *hdev)
782 {
783 	struct bt_uuid *uuid;
784 	u8 val = 0;
785 
786 	list_for_each_entry(uuid, &hdev->uuids, list)
787 		val |= uuid->svc_hint;
788 
789 	return val;
790 }
791 
792 static void update_class(struct hci_request *req)
793 {
794 	struct hci_dev *hdev = req->hdev;
795 	u8 cod[3];
796 
797 	BT_DBG("%s", hdev->name);
798 
799 	if (!hdev_is_powered(hdev))
800 		return;
801 
802 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
803 		return;
804 
805 	if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
806 		return;
807 
808 	cod[0] = hdev->minor_class;
809 	cod[1] = hdev->major_class;
810 	cod[2] = get_service_classes(hdev);
811 
812 	if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
813 		cod[1] |= 0x20;
814 
815 	if (memcmp(cod, hdev->dev_class, 3) == 0)
816 		return;
817 
818 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
819 }
820 
821 static bool get_connectable(struct hci_dev *hdev)
822 {
823 	struct pending_cmd *cmd;
824 
825 	/* If there's a pending mgmt command the flag will not yet have
826 	 * it's final value, so check for this first.
827 	 */
828 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829 	if (cmd) {
830 		struct mgmt_mode *cp = cmd->param;
831 		return cp->val;
832 	}
833 
834 	return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
835 }
836 
837 static void enable_advertising(struct hci_request *req)
838 {
839 	struct hci_dev *hdev = req->hdev;
840 	struct hci_cp_le_set_adv_param cp;
841 	u8 own_addr_type, enable = 0x01;
842 	bool connectable;
843 
844 	/* Clear the HCI_ADVERTISING bit temporarily so that the
845 	 * hci_update_random_address knows that it's safe to go ahead
846 	 * and write a new random address. The flag will be set back on
847 	 * as soon as the SET_ADV_ENABLE HCI command completes.
848 	 */
849 	clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
850 
851 	connectable = get_connectable(hdev);
852 
853 	/* Set require_privacy to true only when non-connectable
854 	 * advertising is used. In that case it is fine to use a
855 	 * non-resolvable private address.
856 	 */
857 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
858 		return;
859 
860 	memset(&cp, 0, sizeof(cp));
861 	cp.min_interval = cpu_to_le16(0x0800);
862 	cp.max_interval = cpu_to_le16(0x0800);
863 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
864 	cp.own_address_type = own_addr_type;
865 	cp.channel_map = hdev->le_adv_channel_map;
866 
867 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
868 
869 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
870 }
871 
872 static void disable_advertising(struct hci_request *req)
873 {
874 	u8 enable = 0x00;
875 
876 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
877 }
878 
879 static void service_cache_off(struct work_struct *work)
880 {
881 	struct hci_dev *hdev = container_of(work, struct hci_dev,
882 					    service_cache.work);
883 	struct hci_request req;
884 
885 	if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
886 		return;
887 
888 	hci_req_init(&req, hdev);
889 
890 	hci_dev_lock(hdev);
891 
892 	update_eir(&req);
893 	update_class(&req);
894 
895 	hci_dev_unlock(hdev);
896 
897 	hci_req_run(&req, NULL);
898 }
899 
900 static void rpa_expired(struct work_struct *work)
901 {
902 	struct hci_dev *hdev = container_of(work, struct hci_dev,
903 					    rpa_expired.work);
904 	struct hci_request req;
905 
906 	BT_DBG("");
907 
908 	set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
909 
910 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
911 	    hci_conn_num(hdev, LE_LINK) > 0)
912 		return;
913 
914 	/* The generation of a new RPA and programming it into the
915 	 * controller happens in the enable_advertising() function.
916 	 */
917 
918 	hci_req_init(&req, hdev);
919 
920 	disable_advertising(&req);
921 	enable_advertising(&req);
922 
923 	hci_req_run(&req, NULL);
924 }
925 
926 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
927 {
928 	if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
929 		return;
930 
931 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
932 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
933 
934 	/* Non-mgmt controlled devices get this bit set
935 	 * implicitly so that pairing works for them, however
936 	 * for mgmt we require user-space to explicitly enable
937 	 * it
938 	 */
939 	clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
940 }
941 
942 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
943 				void *data, u16 data_len)
944 {
945 	struct mgmt_rp_read_info rp;
946 
947 	BT_DBG("sock %p %s", sk, hdev->name);
948 
949 	hci_dev_lock(hdev);
950 
951 	memset(&rp, 0, sizeof(rp));
952 
953 	bacpy(&rp.bdaddr, &hdev->bdaddr);
954 
955 	rp.version = hdev->hci_ver;
956 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
957 
958 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
959 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
960 
961 	memcpy(rp.dev_class, hdev->dev_class, 3);
962 
963 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
964 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
965 
966 	hci_dev_unlock(hdev);
967 
968 	return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
969 			    sizeof(rp));
970 }
971 
972 static void mgmt_pending_free(struct pending_cmd *cmd)
973 {
974 	sock_put(cmd->sk);
975 	kfree(cmd->param);
976 	kfree(cmd);
977 }
978 
979 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
980 					    struct hci_dev *hdev, void *data,
981 					    u16 len)
982 {
983 	struct pending_cmd *cmd;
984 
985 	cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
986 	if (!cmd)
987 		return NULL;
988 
989 	cmd->opcode = opcode;
990 	cmd->index = hdev->id;
991 
992 	cmd->param = kmalloc(len, GFP_KERNEL);
993 	if (!cmd->param) {
994 		kfree(cmd);
995 		return NULL;
996 	}
997 
998 	if (data)
999 		memcpy(cmd->param, data, len);
1000 
1001 	cmd->sk = sk;
1002 	sock_hold(sk);
1003 
1004 	list_add(&cmd->list, &hdev->mgmt_pending);
1005 
1006 	return cmd;
1007 }
1008 
1009 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1010 				 void (*cb)(struct pending_cmd *cmd,
1011 					    void *data),
1012 				 void *data)
1013 {
1014 	struct pending_cmd *cmd, *tmp;
1015 
1016 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1017 		if (opcode > 0 && cmd->opcode != opcode)
1018 			continue;
1019 
1020 		cb(cmd, data);
1021 	}
1022 }
1023 
1024 static void mgmt_pending_remove(struct pending_cmd *cmd)
1025 {
1026 	list_del(&cmd->list);
1027 	mgmt_pending_free(cmd);
1028 }
1029 
1030 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1031 {
1032 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1033 
1034 	return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1035 			    sizeof(settings));
1036 }
1037 
1038 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1039 {
1040 	BT_DBG("%s status 0x%02x", hdev->name, status);
1041 
1042 	if (hci_conn_count(hdev) == 0) {
1043 		cancel_delayed_work(&hdev->power_off);
1044 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1045 	}
1046 }
1047 
1048 static int clean_up_hci_state(struct hci_dev *hdev)
1049 {
1050 	struct hci_request req;
1051 	struct hci_conn *conn;
1052 
1053 	hci_req_init(&req, hdev);
1054 
1055 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1056 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1057 		u8 scan = 0x00;
1058 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1059 	}
1060 
1061 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1062 		disable_advertising(&req);
1063 
1064 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1065 		hci_req_add_le_scan_disable(&req);
1066 	}
1067 
1068 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1069 		struct hci_cp_disconnect dc;
1070 		struct hci_cp_reject_conn_req rej;
1071 
1072 		switch (conn->state) {
1073 		case BT_CONNECTED:
1074 		case BT_CONFIG:
1075 			dc.handle = cpu_to_le16(conn->handle);
1076 			dc.reason = 0x15; /* Terminated due to Power Off */
1077 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1078 			break;
1079 		case BT_CONNECT:
1080 			if (conn->type == LE_LINK)
1081 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1082 					    0, NULL);
1083 			else if (conn->type == ACL_LINK)
1084 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1085 					    6, &conn->dst);
1086 			break;
1087 		case BT_CONNECT2:
1088 			bacpy(&rej.bdaddr, &conn->dst);
1089 			rej.reason = 0x15; /* Terminated due to Power Off */
1090 			if (conn->type == ACL_LINK)
1091 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1092 					    sizeof(rej), &rej);
1093 			else if (conn->type == SCO_LINK)
1094 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1095 					    sizeof(rej), &rej);
1096 			break;
1097 		}
1098 	}
1099 
1100 	return hci_req_run(&req, clean_up_hci_complete);
1101 }
1102 
1103 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1104 		       u16 len)
1105 {
1106 	struct mgmt_mode *cp = data;
1107 	struct pending_cmd *cmd;
1108 	int err;
1109 
1110 	BT_DBG("request for %s", hdev->name);
1111 
1112 	if (cp->val != 0x00 && cp->val != 0x01)
1113 		return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1114 				  MGMT_STATUS_INVALID_PARAMS);
1115 
1116 	hci_dev_lock(hdev);
1117 
1118 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1119 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1120 				 MGMT_STATUS_BUSY);
1121 		goto failed;
1122 	}
1123 
1124 	if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1125 		cancel_delayed_work(&hdev->power_off);
1126 
1127 		if (cp->val) {
1128 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1129 					 data, len);
1130 			err = mgmt_powered(hdev, 1);
1131 			goto failed;
1132 		}
1133 	}
1134 
1135 	if (!!cp->val == hdev_is_powered(hdev)) {
1136 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1137 		goto failed;
1138 	}
1139 
1140 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1141 	if (!cmd) {
1142 		err = -ENOMEM;
1143 		goto failed;
1144 	}
1145 
1146 	if (cp->val) {
1147 		queue_work(hdev->req_workqueue, &hdev->power_on);
1148 		err = 0;
1149 	} else {
1150 		/* Disconnect connections, stop scans, etc */
1151 		err = clean_up_hci_state(hdev);
1152 		if (!err)
1153 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1154 					   HCI_POWER_OFF_TIMEOUT);
1155 
1156 		/* ENODATA means there were no HCI commands queued */
1157 		if (err == -ENODATA) {
1158 			cancel_delayed_work(&hdev->power_off);
1159 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1160 			err = 0;
1161 		}
1162 	}
1163 
1164 failed:
1165 	hci_dev_unlock(hdev);
1166 	return err;
1167 }
1168 
1169 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1170 		      struct sock *skip_sk)
1171 {
1172 	struct sk_buff *skb;
1173 	struct mgmt_hdr *hdr;
1174 
1175 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1176 	if (!skb)
1177 		return -ENOMEM;
1178 
1179 	hdr = (void *) skb_put(skb, sizeof(*hdr));
1180 	hdr->opcode = cpu_to_le16(event);
1181 	if (hdev)
1182 		hdr->index = cpu_to_le16(hdev->id);
1183 	else
1184 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
1185 	hdr->len = cpu_to_le16(data_len);
1186 
1187 	if (data)
1188 		memcpy(skb_put(skb, data_len), data, data_len);
1189 
1190 	/* Time stamp */
1191 	__net_timestamp(skb);
1192 
1193 	hci_send_to_control(skb, skip_sk);
1194 	kfree_skb(skb);
1195 
1196 	return 0;
1197 }
1198 
1199 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1200 {
1201 	__le32 ev;
1202 
1203 	ev = cpu_to_le32(get_current_settings(hdev));
1204 
1205 	return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1206 }
1207 
1208 struct cmd_lookup {
1209 	struct sock *sk;
1210 	struct hci_dev *hdev;
1211 	u8 mgmt_status;
1212 };
1213 
1214 static void settings_rsp(struct pending_cmd *cmd, void *data)
1215 {
1216 	struct cmd_lookup *match = data;
1217 
1218 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1219 
1220 	list_del(&cmd->list);
1221 
1222 	if (match->sk == NULL) {
1223 		match->sk = cmd->sk;
1224 		sock_hold(match->sk);
1225 	}
1226 
1227 	mgmt_pending_free(cmd);
1228 }
1229 
1230 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1231 {
1232 	u8 *status = data;
1233 
1234 	cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1235 	mgmt_pending_remove(cmd);
1236 }
1237 
1238 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1239 {
1240 	if (!lmp_bredr_capable(hdev))
1241 		return MGMT_STATUS_NOT_SUPPORTED;
1242 	else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1243 		return MGMT_STATUS_REJECTED;
1244 	else
1245 		return MGMT_STATUS_SUCCESS;
1246 }
1247 
1248 static u8 mgmt_le_support(struct hci_dev *hdev)
1249 {
1250 	if (!lmp_le_capable(hdev))
1251 		return MGMT_STATUS_NOT_SUPPORTED;
1252 	else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1253 		return MGMT_STATUS_REJECTED;
1254 	else
1255 		return MGMT_STATUS_SUCCESS;
1256 }
1257 
1258 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1259 {
1260 	struct pending_cmd *cmd;
1261 	struct mgmt_mode *cp;
1262 	struct hci_request req;
1263 	bool changed;
1264 
1265 	BT_DBG("status 0x%02x", status);
1266 
1267 	hci_dev_lock(hdev);
1268 
1269 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1270 	if (!cmd)
1271 		goto unlock;
1272 
1273 	if (status) {
1274 		u8 mgmt_err = mgmt_status(status);
1275 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1276 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1277 		goto remove_cmd;
1278 	}
1279 
1280 	cp = cmd->param;
1281 	if (cp->val) {
1282 		changed = !test_and_set_bit(HCI_DISCOVERABLE,
1283 					    &hdev->dev_flags);
1284 
1285 		if (hdev->discov_timeout > 0) {
1286 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1287 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1288 					   to);
1289 		}
1290 	} else {
1291 		changed = test_and_clear_bit(HCI_DISCOVERABLE,
1292 					     &hdev->dev_flags);
1293 	}
1294 
1295 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1296 
1297 	if (changed)
1298 		new_settings(hdev, cmd->sk);
1299 
1300 	/* When the discoverable mode gets changed, make sure
1301 	 * that class of device has the limited discoverable
1302 	 * bit correctly set.
1303 	 */
1304 	hci_req_init(&req, hdev);
1305 	update_class(&req);
1306 	hci_req_run(&req, NULL);
1307 
1308 remove_cmd:
1309 	mgmt_pending_remove(cmd);
1310 
1311 unlock:
1312 	hci_dev_unlock(hdev);
1313 }
1314 
1315 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1316 			    u16 len)
1317 {
1318 	struct mgmt_cp_set_discoverable *cp = data;
1319 	struct pending_cmd *cmd;
1320 	struct hci_request req;
1321 	u16 timeout;
1322 	u8 scan;
1323 	int err;
1324 
1325 	BT_DBG("request for %s", hdev->name);
1326 
1327 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1328 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1329 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1330 				  MGMT_STATUS_REJECTED);
1331 
1332 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1333 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1334 				  MGMT_STATUS_INVALID_PARAMS);
1335 
1336 	timeout = __le16_to_cpu(cp->timeout);
1337 
1338 	/* Disabling discoverable requires that no timeout is set,
1339 	 * and enabling limited discoverable requires a timeout.
1340 	 */
1341 	if ((cp->val == 0x00 && timeout > 0) ||
1342 	    (cp->val == 0x02 && timeout == 0))
1343 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1344 				  MGMT_STATUS_INVALID_PARAMS);
1345 
1346 	hci_dev_lock(hdev);
1347 
1348 	if (!hdev_is_powered(hdev) && timeout > 0) {
1349 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1350 				 MGMT_STATUS_NOT_POWERED);
1351 		goto failed;
1352 	}
1353 
1354 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1355 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1356 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1357 				 MGMT_STATUS_BUSY);
1358 		goto failed;
1359 	}
1360 
1361 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1362 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1363 				 MGMT_STATUS_REJECTED);
1364 		goto failed;
1365 	}
1366 
1367 	if (!hdev_is_powered(hdev)) {
1368 		bool changed = false;
1369 
1370 		/* Setting limited discoverable when powered off is
1371 		 * not a valid operation since it requires a timeout
1372 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1373 		 */
1374 		if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1375 			change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1376 			changed = true;
1377 		}
1378 
1379 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1380 		if (err < 0)
1381 			goto failed;
1382 
1383 		if (changed)
1384 			err = new_settings(hdev, sk);
1385 
1386 		goto failed;
1387 	}
1388 
1389 	/* If the current mode is the same, then just update the timeout
1390 	 * value with the new value. And if only the timeout gets updated,
1391 	 * then no need for any HCI transactions.
1392 	 */
1393 	if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1394 	    (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1395 					  &hdev->dev_flags)) {
1396 		cancel_delayed_work(&hdev->discov_off);
1397 		hdev->discov_timeout = timeout;
1398 
1399 		if (cp->val && hdev->discov_timeout > 0) {
1400 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1401 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1402 					   to);
1403 		}
1404 
1405 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1406 		goto failed;
1407 	}
1408 
1409 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1410 	if (!cmd) {
1411 		err = -ENOMEM;
1412 		goto failed;
1413 	}
1414 
1415 	/* Cancel any potential discoverable timeout that might be
1416 	 * still active and store new timeout value. The arming of
1417 	 * the timeout happens in the complete handler.
1418 	 */
1419 	cancel_delayed_work(&hdev->discov_off);
1420 	hdev->discov_timeout = timeout;
1421 
1422 	/* Limited discoverable mode */
1423 	if (cp->val == 0x02)
1424 		set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1425 	else
1426 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1427 
1428 	hci_req_init(&req, hdev);
1429 
1430 	/* The procedure for LE-only controllers is much simpler - just
1431 	 * update the advertising data.
1432 	 */
1433 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1434 		goto update_ad;
1435 
1436 	scan = SCAN_PAGE;
1437 
1438 	if (cp->val) {
1439 		struct hci_cp_write_current_iac_lap hci_cp;
1440 
1441 		if (cp->val == 0x02) {
1442 			/* Limited discoverable mode */
1443 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1444 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1445 			hci_cp.iac_lap[1] = 0x8b;
1446 			hci_cp.iac_lap[2] = 0x9e;
1447 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1448 			hci_cp.iac_lap[4] = 0x8b;
1449 			hci_cp.iac_lap[5] = 0x9e;
1450 		} else {
1451 			/* General discoverable mode */
1452 			hci_cp.num_iac = 1;
1453 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1454 			hci_cp.iac_lap[1] = 0x8b;
1455 			hci_cp.iac_lap[2] = 0x9e;
1456 		}
1457 
1458 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1459 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1460 
1461 		scan |= SCAN_INQUIRY;
1462 	} else {
1463 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1464 	}
1465 
1466 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1467 
1468 update_ad:
1469 	update_adv_data(&req);
1470 
1471 	err = hci_req_run(&req, set_discoverable_complete);
1472 	if (err < 0)
1473 		mgmt_pending_remove(cmd);
1474 
1475 failed:
1476 	hci_dev_unlock(hdev);
1477 	return err;
1478 }
1479 
1480 static void write_fast_connectable(struct hci_request *req, bool enable)
1481 {
1482 	struct hci_dev *hdev = req->hdev;
1483 	struct hci_cp_write_page_scan_activity acp;
1484 	u8 type;
1485 
1486 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1487 		return;
1488 
1489 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1490 		return;
1491 
1492 	if (enable) {
1493 		type = PAGE_SCAN_TYPE_INTERLACED;
1494 
1495 		/* 160 msec page scan interval */
1496 		acp.interval = cpu_to_le16(0x0100);
1497 	} else {
1498 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1499 
1500 		/* default 1.28 sec page scan */
1501 		acp.interval = cpu_to_le16(0x0800);
1502 	}
1503 
1504 	acp.window = cpu_to_le16(0x0012);
1505 
1506 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1507 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1508 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1509 			    sizeof(acp), &acp);
1510 
1511 	if (hdev->page_scan_type != type)
1512 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1513 }
1514 
1515 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1516 {
1517 	struct pending_cmd *cmd;
1518 	struct mgmt_mode *cp;
1519 	bool changed;
1520 
1521 	BT_DBG("status 0x%02x", status);
1522 
1523 	hci_dev_lock(hdev);
1524 
1525 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1526 	if (!cmd)
1527 		goto unlock;
1528 
1529 	if (status) {
1530 		u8 mgmt_err = mgmt_status(status);
1531 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1532 		goto remove_cmd;
1533 	}
1534 
1535 	cp = cmd->param;
1536 	if (cp->val)
1537 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1538 	else
1539 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1540 
1541 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1542 
1543 	if (changed)
1544 		new_settings(hdev, cmd->sk);
1545 
1546 remove_cmd:
1547 	mgmt_pending_remove(cmd);
1548 
1549 unlock:
1550 	hci_dev_unlock(hdev);
1551 }
1552 
1553 static int set_connectable_update_settings(struct hci_dev *hdev,
1554 					   struct sock *sk, u8 val)
1555 {
1556 	bool changed = false;
1557 	int err;
1558 
1559 	if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1560 		changed = true;
1561 
1562 	if (val) {
1563 		set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1564 	} else {
1565 		clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1566 		clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1567 	}
1568 
1569 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1570 	if (err < 0)
1571 		return err;
1572 
1573 	if (changed)
1574 		return new_settings(hdev, sk);
1575 
1576 	return 0;
1577 }
1578 
1579 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1580 			   u16 len)
1581 {
1582 	struct mgmt_mode *cp = data;
1583 	struct pending_cmd *cmd;
1584 	struct hci_request req;
1585 	u8 scan;
1586 	int err;
1587 
1588 	BT_DBG("request for %s", hdev->name);
1589 
1590 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1591 	    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1592 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 				  MGMT_STATUS_REJECTED);
1594 
1595 	if (cp->val != 0x00 && cp->val != 0x01)
1596 		return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1597 				  MGMT_STATUS_INVALID_PARAMS);
1598 
1599 	hci_dev_lock(hdev);
1600 
1601 	if (!hdev_is_powered(hdev)) {
1602 		err = set_connectable_update_settings(hdev, sk, cp->val);
1603 		goto failed;
1604 	}
1605 
1606 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1607 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1608 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1609 				 MGMT_STATUS_BUSY);
1610 		goto failed;
1611 	}
1612 
1613 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1614 	if (!cmd) {
1615 		err = -ENOMEM;
1616 		goto failed;
1617 	}
1618 
1619 	hci_req_init(&req, hdev);
1620 
1621 	/* If BR/EDR is not enabled and we disable advertising as a
1622 	 * by-product of disabling connectable, we need to update the
1623 	 * advertising flags.
1624 	 */
1625 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1626 		if (!cp->val) {
1627 			clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1628 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1629 		}
1630 		update_adv_data(&req);
1631 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1632 		if (cp->val) {
1633 			scan = SCAN_PAGE;
1634 		} else {
1635 			scan = 0;
1636 
1637 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
1638 			    hdev->discov_timeout > 0)
1639 				cancel_delayed_work(&hdev->discov_off);
1640 		}
1641 
1642 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1643 	}
1644 
1645 	/* If we're going from non-connectable to connectable or
1646 	 * vice-versa when fast connectable is enabled ensure that fast
1647 	 * connectable gets disabled. write_fast_connectable won't do
1648 	 * anything if the page scan parameters are already what they
1649 	 * should be.
1650 	 */
1651 	if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1652 		write_fast_connectable(&req, false);
1653 
1654 	if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1655 	    hci_conn_num(hdev, LE_LINK) == 0) {
1656 		disable_advertising(&req);
1657 		enable_advertising(&req);
1658 	}
1659 
1660 	err = hci_req_run(&req, set_connectable_complete);
1661 	if (err < 0) {
1662 		mgmt_pending_remove(cmd);
1663 		if (err == -ENODATA)
1664 			err = set_connectable_update_settings(hdev, sk,
1665 							      cp->val);
1666 		goto failed;
1667 	}
1668 
1669 failed:
1670 	hci_dev_unlock(hdev);
1671 	return err;
1672 }
1673 
1674 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1675 			u16 len)
1676 {
1677 	struct mgmt_mode *cp = data;
1678 	bool changed;
1679 	int err;
1680 
1681 	BT_DBG("request for %s", hdev->name);
1682 
1683 	if (cp->val != 0x00 && cp->val != 0x01)
1684 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1685 				  MGMT_STATUS_INVALID_PARAMS);
1686 
1687 	hci_dev_lock(hdev);
1688 
1689 	if (cp->val)
1690 		changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1691 	else
1692 		changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1693 
1694 	err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1695 	if (err < 0)
1696 		goto unlock;
1697 
1698 	if (changed)
1699 		err = new_settings(hdev, sk);
1700 
1701 unlock:
1702 	hci_dev_unlock(hdev);
1703 	return err;
1704 }
1705 
1706 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1707 			     u16 len)
1708 {
1709 	struct mgmt_mode *cp = data;
1710 	struct pending_cmd *cmd;
1711 	u8 val, status;
1712 	int err;
1713 
1714 	BT_DBG("request for %s", hdev->name);
1715 
1716 	status = mgmt_bredr_support(hdev);
1717 	if (status)
1718 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1719 				  status);
1720 
1721 	if (cp->val != 0x00 && cp->val != 0x01)
1722 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1723 				  MGMT_STATUS_INVALID_PARAMS);
1724 
1725 	hci_dev_lock(hdev);
1726 
1727 	if (!hdev_is_powered(hdev)) {
1728 		bool changed = false;
1729 
1730 		if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1731 					  &hdev->dev_flags)) {
1732 			change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1733 			changed = true;
1734 		}
1735 
1736 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1737 		if (err < 0)
1738 			goto failed;
1739 
1740 		if (changed)
1741 			err = new_settings(hdev, sk);
1742 
1743 		goto failed;
1744 	}
1745 
1746 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1747 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1748 				 MGMT_STATUS_BUSY);
1749 		goto failed;
1750 	}
1751 
1752 	val = !!cp->val;
1753 
1754 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1755 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1756 		goto failed;
1757 	}
1758 
1759 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1760 	if (!cmd) {
1761 		err = -ENOMEM;
1762 		goto failed;
1763 	}
1764 
1765 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1766 	if (err < 0) {
1767 		mgmt_pending_remove(cmd);
1768 		goto failed;
1769 	}
1770 
1771 failed:
1772 	hci_dev_unlock(hdev);
1773 	return err;
1774 }
1775 
1776 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1777 {
1778 	struct mgmt_mode *cp = data;
1779 	struct pending_cmd *cmd;
1780 	u8 status;
1781 	int err;
1782 
1783 	BT_DBG("request for %s", hdev->name);
1784 
1785 	status = mgmt_bredr_support(hdev);
1786 	if (status)
1787 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1788 
1789 	if (!lmp_ssp_capable(hdev))
1790 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791 				  MGMT_STATUS_NOT_SUPPORTED);
1792 
1793 	if (cp->val != 0x00 && cp->val != 0x01)
1794 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1795 				  MGMT_STATUS_INVALID_PARAMS);
1796 
1797 	hci_dev_lock(hdev);
1798 
1799 	if (!hdev_is_powered(hdev)) {
1800 		bool changed;
1801 
1802 		if (cp->val) {
1803 			changed = !test_and_set_bit(HCI_SSP_ENABLED,
1804 						    &hdev->dev_flags);
1805 		} else {
1806 			changed = test_and_clear_bit(HCI_SSP_ENABLED,
1807 						     &hdev->dev_flags);
1808 			if (!changed)
1809 				changed = test_and_clear_bit(HCI_HS_ENABLED,
1810 							     &hdev->dev_flags);
1811 			else
1812 				clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1813 		}
1814 
1815 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1816 		if (err < 0)
1817 			goto failed;
1818 
1819 		if (changed)
1820 			err = new_settings(hdev, sk);
1821 
1822 		goto failed;
1823 	}
1824 
1825 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1826 	    mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1827 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1828 				 MGMT_STATUS_BUSY);
1829 		goto failed;
1830 	}
1831 
1832 	if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1833 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1834 		goto failed;
1835 	}
1836 
1837 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1838 	if (!cmd) {
1839 		err = -ENOMEM;
1840 		goto failed;
1841 	}
1842 
1843 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1844 	if (err < 0) {
1845 		mgmt_pending_remove(cmd);
1846 		goto failed;
1847 	}
1848 
1849 failed:
1850 	hci_dev_unlock(hdev);
1851 	return err;
1852 }
1853 
1854 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1855 {
1856 	struct mgmt_mode *cp = data;
1857 	bool changed;
1858 	u8 status;
1859 	int err;
1860 
1861 	BT_DBG("request for %s", hdev->name);
1862 
1863 	status = mgmt_bredr_support(hdev);
1864 	if (status)
1865 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1866 
1867 	if (!lmp_ssp_capable(hdev))
1868 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869 				  MGMT_STATUS_NOT_SUPPORTED);
1870 
1871 	if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1872 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1873 				  MGMT_STATUS_REJECTED);
1874 
1875 	if (cp->val != 0x00 && cp->val != 0x01)
1876 		return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1877 				  MGMT_STATUS_INVALID_PARAMS);
1878 
1879 	hci_dev_lock(hdev);
1880 
1881 	if (cp->val) {
1882 		changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1883 	} else {
1884 		if (hdev_is_powered(hdev)) {
1885 			err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1886 					 MGMT_STATUS_REJECTED);
1887 			goto unlock;
1888 		}
1889 
1890 		changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1891 	}
1892 
1893 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1894 	if (err < 0)
1895 		goto unlock;
1896 
1897 	if (changed)
1898 		err = new_settings(hdev, sk);
1899 
1900 unlock:
1901 	hci_dev_unlock(hdev);
1902 	return err;
1903 }
1904 
1905 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1906 {
1907 	struct cmd_lookup match = { NULL, hdev };
1908 
1909 	if (status) {
1910 		u8 mgmt_err = mgmt_status(status);
1911 
1912 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1913 				     &mgmt_err);
1914 		return;
1915 	}
1916 
1917 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1918 
1919 	new_settings(hdev, match.sk);
1920 
1921 	if (match.sk)
1922 		sock_put(match.sk);
1923 
1924 	/* Make sure the controller has a good default for
1925 	 * advertising data. Restrict the update to when LE
1926 	 * has actually been enabled. During power on, the
1927 	 * update in powered_update_hci will take care of it.
1928 	 */
1929 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1930 		struct hci_request req;
1931 
1932 		hci_dev_lock(hdev);
1933 
1934 		hci_req_init(&req, hdev);
1935 		update_adv_data(&req);
1936 		update_scan_rsp_data(&req);
1937 		hci_req_run(&req, NULL);
1938 
1939 		hci_dev_unlock(hdev);
1940 	}
1941 }
1942 
1943 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	struct hci_cp_write_le_host_supported hci_cp;
1947 	struct pending_cmd *cmd;
1948 	struct hci_request req;
1949 	int err;
1950 	u8 val, enabled;
1951 
1952 	BT_DBG("request for %s", hdev->name);
1953 
1954 	if (!lmp_le_capable(hdev))
1955 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 				  MGMT_STATUS_NOT_SUPPORTED);
1957 
1958 	if (cp->val != 0x00 && cp->val != 0x01)
1959 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1960 				  MGMT_STATUS_INVALID_PARAMS);
1961 
1962 	/* LE-only devices do not allow toggling LE on/off */
1963 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1964 		return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1965 				  MGMT_STATUS_REJECTED);
1966 
1967 	hci_dev_lock(hdev);
1968 
1969 	val = !!cp->val;
1970 	enabled = lmp_host_le_capable(hdev);
1971 
1972 	if (!hdev_is_powered(hdev) || val == enabled) {
1973 		bool changed = false;
1974 
1975 		if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1976 			change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1977 			changed = true;
1978 		}
1979 
1980 		if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1981 			clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1982 			changed = true;
1983 		}
1984 
1985 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1986 		if (err < 0)
1987 			goto unlock;
1988 
1989 		if (changed)
1990 			err = new_settings(hdev, sk);
1991 
1992 		goto unlock;
1993 	}
1994 
1995 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1996 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1997 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1998 				 MGMT_STATUS_BUSY);
1999 		goto unlock;
2000 	}
2001 
2002 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2003 	if (!cmd) {
2004 		err = -ENOMEM;
2005 		goto unlock;
2006 	}
2007 
2008 	hci_req_init(&req, hdev);
2009 
2010 	memset(&hci_cp, 0, sizeof(hci_cp));
2011 
2012 	if (val) {
2013 		hci_cp.le = val;
2014 		hci_cp.simul = lmp_le_br_capable(hdev);
2015 	} else {
2016 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2017 			disable_advertising(&req);
2018 	}
2019 
2020 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 		    &hci_cp);
2022 
2023 	err = hci_req_run(&req, le_enable_complete);
2024 	if (err < 0)
2025 		mgmt_pending_remove(cmd);
2026 
2027 unlock:
2028 	hci_dev_unlock(hdev);
2029 	return err;
2030 }
2031 
2032 /* This is a helper function to test for pending mgmt commands that can
2033  * cause CoD or EIR HCI commands. We can only allow one such pending
2034  * mgmt command at a time since otherwise we cannot easily track what
2035  * the current values are, will be, and based on that calculate if a new
2036  * HCI command needs to be sent and if yes with what value.
2037  */
2038 static bool pending_eir_or_class(struct hci_dev *hdev)
2039 {
2040 	struct pending_cmd *cmd;
2041 
2042 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043 		switch (cmd->opcode) {
2044 		case MGMT_OP_ADD_UUID:
2045 		case MGMT_OP_REMOVE_UUID:
2046 		case MGMT_OP_SET_DEV_CLASS:
2047 		case MGMT_OP_SET_POWERED:
2048 			return true;
2049 		}
2050 	}
2051 
2052 	return false;
2053 }
2054 
2055 static const u8 bluetooth_base_uuid[] = {
2056 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058 };
2059 
2060 static u8 get_uuid_size(const u8 *uuid)
2061 {
2062 	u32 val;
2063 
2064 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 		return 128;
2066 
2067 	val = get_unaligned_le32(&uuid[12]);
2068 	if (val > 0xffff)
2069 		return 32;
2070 
2071 	return 16;
2072 }
2073 
2074 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2075 {
2076 	struct pending_cmd *cmd;
2077 
2078 	hci_dev_lock(hdev);
2079 
2080 	cmd = mgmt_pending_find(mgmt_op, hdev);
2081 	if (!cmd)
2082 		goto unlock;
2083 
2084 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2085 		     hdev->dev_class, 3);
2086 
2087 	mgmt_pending_remove(cmd);
2088 
2089 unlock:
2090 	hci_dev_unlock(hdev);
2091 }
2092 
2093 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2094 {
2095 	BT_DBG("status 0x%02x", status);
2096 
2097 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098 }
2099 
2100 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2101 {
2102 	struct mgmt_cp_add_uuid *cp = data;
2103 	struct pending_cmd *cmd;
2104 	struct hci_request req;
2105 	struct bt_uuid *uuid;
2106 	int err;
2107 
2108 	BT_DBG("request for %s", hdev->name);
2109 
2110 	hci_dev_lock(hdev);
2111 
2112 	if (pending_eir_or_class(hdev)) {
2113 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2114 				 MGMT_STATUS_BUSY);
2115 		goto failed;
2116 	}
2117 
2118 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2119 	if (!uuid) {
2120 		err = -ENOMEM;
2121 		goto failed;
2122 	}
2123 
2124 	memcpy(uuid->uuid, cp->uuid, 16);
2125 	uuid->svc_hint = cp->svc_hint;
2126 	uuid->size = get_uuid_size(cp->uuid);
2127 
2128 	list_add_tail(&uuid->list, &hdev->uuids);
2129 
2130 	hci_req_init(&req, hdev);
2131 
2132 	update_class(&req);
2133 	update_eir(&req);
2134 
2135 	err = hci_req_run(&req, add_uuid_complete);
2136 	if (err < 0) {
2137 		if (err != -ENODATA)
2138 			goto failed;
2139 
2140 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141 				   hdev->dev_class, 3);
2142 		goto failed;
2143 	}
2144 
2145 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2146 	if (!cmd) {
2147 		err = -ENOMEM;
2148 		goto failed;
2149 	}
2150 
2151 	err = 0;
2152 
2153 failed:
2154 	hci_dev_unlock(hdev);
2155 	return err;
2156 }
2157 
2158 static bool enable_service_cache(struct hci_dev *hdev)
2159 {
2160 	if (!hdev_is_powered(hdev))
2161 		return false;
2162 
2163 	if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2164 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2165 				   CACHE_TIMEOUT);
2166 		return true;
2167 	}
2168 
2169 	return false;
2170 }
2171 
2172 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2173 {
2174 	BT_DBG("status 0x%02x", status);
2175 
2176 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177 }
2178 
2179 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 		       u16 len)
2181 {
2182 	struct mgmt_cp_remove_uuid *cp = data;
2183 	struct pending_cmd *cmd;
2184 	struct bt_uuid *match, *tmp;
2185 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186 	struct hci_request req;
2187 	int err, found;
2188 
2189 	BT_DBG("request for %s", hdev->name);
2190 
2191 	hci_dev_lock(hdev);
2192 
2193 	if (pending_eir_or_class(hdev)) {
2194 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2195 				 MGMT_STATUS_BUSY);
2196 		goto unlock;
2197 	}
2198 
2199 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200 		hci_uuids_clear(hdev);
2201 
2202 		if (enable_service_cache(hdev)) {
2203 			err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2204 					   0, hdev->dev_class, 3);
2205 			goto unlock;
2206 		}
2207 
2208 		goto update_class;
2209 	}
2210 
2211 	found = 0;
2212 
2213 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2214 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2215 			continue;
2216 
2217 		list_del(&match->list);
2218 		kfree(match);
2219 		found++;
2220 	}
2221 
2222 	if (found == 0) {
2223 		err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2224 				 MGMT_STATUS_INVALID_PARAMS);
2225 		goto unlock;
2226 	}
2227 
2228 update_class:
2229 	hci_req_init(&req, hdev);
2230 
2231 	update_class(&req);
2232 	update_eir(&req);
2233 
2234 	err = hci_req_run(&req, remove_uuid_complete);
2235 	if (err < 0) {
2236 		if (err != -ENODATA)
2237 			goto unlock;
2238 
2239 		err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2240 				   hdev->dev_class, 3);
2241 		goto unlock;
2242 	}
2243 
2244 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2245 	if (!cmd) {
2246 		err = -ENOMEM;
2247 		goto unlock;
2248 	}
2249 
2250 	err = 0;
2251 
2252 unlock:
2253 	hci_dev_unlock(hdev);
2254 	return err;
2255 }
2256 
2257 static void set_class_complete(struct hci_dev *hdev, u8 status)
2258 {
2259 	BT_DBG("status 0x%02x", status);
2260 
2261 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2262 }
2263 
2264 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2265 			 u16 len)
2266 {
2267 	struct mgmt_cp_set_dev_class *cp = data;
2268 	struct pending_cmd *cmd;
2269 	struct hci_request req;
2270 	int err;
2271 
2272 	BT_DBG("request for %s", hdev->name);
2273 
2274 	if (!lmp_bredr_capable(hdev))
2275 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2276 				  MGMT_STATUS_NOT_SUPPORTED);
2277 
2278 	hci_dev_lock(hdev);
2279 
2280 	if (pending_eir_or_class(hdev)) {
2281 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2282 				 MGMT_STATUS_BUSY);
2283 		goto unlock;
2284 	}
2285 
2286 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2287 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 				 MGMT_STATUS_INVALID_PARAMS);
2289 		goto unlock;
2290 	}
2291 
2292 	hdev->major_class = cp->major;
2293 	hdev->minor_class = cp->minor;
2294 
2295 	if (!hdev_is_powered(hdev)) {
2296 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2297 				   hdev->dev_class, 3);
2298 		goto unlock;
2299 	}
2300 
2301 	hci_req_init(&req, hdev);
2302 
2303 	if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2304 		hci_dev_unlock(hdev);
2305 		cancel_delayed_work_sync(&hdev->service_cache);
2306 		hci_dev_lock(hdev);
2307 		update_eir(&req);
2308 	}
2309 
2310 	update_class(&req);
2311 
2312 	err = hci_req_run(&req, set_class_complete);
2313 	if (err < 0) {
2314 		if (err != -ENODATA)
2315 			goto unlock;
2316 
2317 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2318 				   hdev->dev_class, 3);
2319 		goto unlock;
2320 	}
2321 
2322 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2323 	if (!cmd) {
2324 		err = -ENOMEM;
2325 		goto unlock;
2326 	}
2327 
2328 	err = 0;
2329 
2330 unlock:
2331 	hci_dev_unlock(hdev);
2332 	return err;
2333 }
2334 
2335 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2336 			  u16 len)
2337 {
2338 	struct mgmt_cp_load_link_keys *cp = data;
2339 	u16 key_count, expected_len;
2340 	bool changed;
2341 	int i;
2342 
2343 	BT_DBG("request for %s", hdev->name);
2344 
2345 	if (!lmp_bredr_capable(hdev))
2346 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2347 				  MGMT_STATUS_NOT_SUPPORTED);
2348 
2349 	key_count = __le16_to_cpu(cp->key_count);
2350 
2351 	expected_len = sizeof(*cp) + key_count *
2352 					sizeof(struct mgmt_link_key_info);
2353 	if (expected_len != len) {
2354 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2355 		       expected_len, len);
2356 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 				  MGMT_STATUS_INVALID_PARAMS);
2358 	}
2359 
2360 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2361 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2362 				  MGMT_STATUS_INVALID_PARAMS);
2363 
2364 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2365 	       key_count);
2366 
2367 	for (i = 0; i < key_count; i++) {
2368 		struct mgmt_link_key_info *key = &cp->keys[i];
2369 
2370 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2371 			return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2372 					  MGMT_STATUS_INVALID_PARAMS);
2373 	}
2374 
2375 	hci_dev_lock(hdev);
2376 
2377 	hci_link_keys_clear(hdev);
2378 
2379 	if (cp->debug_keys)
2380 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2381 	else
2382 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2383 
2384 	if (changed)
2385 		new_settings(hdev, NULL);
2386 
2387 	for (i = 0; i < key_count; i++) {
2388 		struct mgmt_link_key_info *key = &cp->keys[i];
2389 
2390 		hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2391 				 key->type, key->pin_len);
2392 	}
2393 
2394 	cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2395 
2396 	hci_dev_unlock(hdev);
2397 
2398 	return 0;
2399 }
2400 
2401 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2402 			   u8 addr_type, struct sock *skip_sk)
2403 {
2404 	struct mgmt_ev_device_unpaired ev;
2405 
2406 	bacpy(&ev.addr.bdaddr, bdaddr);
2407 	ev.addr.type = addr_type;
2408 
2409 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2410 			  skip_sk);
2411 }
2412 
2413 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2414 			 u16 len)
2415 {
2416 	struct mgmt_cp_unpair_device *cp = data;
2417 	struct mgmt_rp_unpair_device rp;
2418 	struct hci_cp_disconnect dc;
2419 	struct pending_cmd *cmd;
2420 	struct hci_conn *conn;
2421 	int err;
2422 
2423 	memset(&rp, 0, sizeof(rp));
2424 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2425 	rp.addr.type = cp->addr.type;
2426 
2427 	if (!bdaddr_type_is_valid(cp->addr.type))
2428 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2429 				    MGMT_STATUS_INVALID_PARAMS,
2430 				    &rp, sizeof(rp));
2431 
2432 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2433 		return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2434 				    MGMT_STATUS_INVALID_PARAMS,
2435 				    &rp, sizeof(rp));
2436 
2437 	hci_dev_lock(hdev);
2438 
2439 	if (!hdev_is_powered(hdev)) {
2440 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2441 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2442 		goto unlock;
2443 	}
2444 
2445 	if (cp->addr.type == BDADDR_BREDR) {
2446 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2447 	} else {
2448 		u8 addr_type;
2449 
2450 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2451 			addr_type = ADDR_LE_DEV_PUBLIC;
2452 		else
2453 			addr_type = ADDR_LE_DEV_RANDOM;
2454 
2455 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2456 
2457 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2458 
2459 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2460 	}
2461 
2462 	if (err < 0) {
2463 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 				   MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2465 		goto unlock;
2466 	}
2467 
2468 	if (cp->disconnect) {
2469 		if (cp->addr.type == BDADDR_BREDR)
2470 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2471 						       &cp->addr.bdaddr);
2472 		else
2473 			conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2474 						       &cp->addr.bdaddr);
2475 	} else {
2476 		conn = NULL;
2477 	}
2478 
2479 	if (!conn) {
2480 		err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2481 				   &rp, sizeof(rp));
2482 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2483 		goto unlock;
2484 	}
2485 
2486 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2487 			       sizeof(*cp));
2488 	if (!cmd) {
2489 		err = -ENOMEM;
2490 		goto unlock;
2491 	}
2492 
2493 	dc.handle = cpu_to_le16(conn->handle);
2494 	dc.reason = 0x13; /* Remote User Terminated Connection */
2495 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2496 	if (err < 0)
2497 		mgmt_pending_remove(cmd);
2498 
2499 unlock:
2500 	hci_dev_unlock(hdev);
2501 	return err;
2502 }
2503 
2504 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2505 		      u16 len)
2506 {
2507 	struct mgmt_cp_disconnect *cp = data;
2508 	struct mgmt_rp_disconnect rp;
2509 	struct hci_cp_disconnect dc;
2510 	struct pending_cmd *cmd;
2511 	struct hci_conn *conn;
2512 	int err;
2513 
2514 	BT_DBG("");
2515 
2516 	memset(&rp, 0, sizeof(rp));
2517 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 	rp.addr.type = cp->addr.type;
2519 
2520 	if (!bdaddr_type_is_valid(cp->addr.type))
2521 		return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 				    MGMT_STATUS_INVALID_PARAMS,
2523 				    &rp, sizeof(rp));
2524 
2525 	hci_dev_lock(hdev);
2526 
2527 	if (!test_bit(HCI_UP, &hdev->flags)) {
2528 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2530 		goto failed;
2531 	}
2532 
2533 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2534 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2535 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2536 		goto failed;
2537 	}
2538 
2539 	if (cp->addr.type == BDADDR_BREDR)
2540 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2541 					       &cp->addr.bdaddr);
2542 	else
2543 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2544 
2545 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2546 		err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2547 				   MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2548 		goto failed;
2549 	}
2550 
2551 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2552 	if (!cmd) {
2553 		err = -ENOMEM;
2554 		goto failed;
2555 	}
2556 
2557 	dc.handle = cpu_to_le16(conn->handle);
2558 	dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2559 
2560 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2561 	if (err < 0)
2562 		mgmt_pending_remove(cmd);
2563 
2564 failed:
2565 	hci_dev_unlock(hdev);
2566 	return err;
2567 }
2568 
2569 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2570 {
2571 	switch (link_type) {
2572 	case LE_LINK:
2573 		switch (addr_type) {
2574 		case ADDR_LE_DEV_PUBLIC:
2575 			return BDADDR_LE_PUBLIC;
2576 
2577 		default:
2578 			/* Fallback to LE Random address type */
2579 			return BDADDR_LE_RANDOM;
2580 		}
2581 
2582 	default:
2583 		/* Fallback to BR/EDR type */
2584 		return BDADDR_BREDR;
2585 	}
2586 }
2587 
2588 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2589 			   u16 data_len)
2590 {
2591 	struct mgmt_rp_get_connections *rp;
2592 	struct hci_conn *c;
2593 	size_t rp_len;
2594 	int err;
2595 	u16 i;
2596 
2597 	BT_DBG("");
2598 
2599 	hci_dev_lock(hdev);
2600 
2601 	if (!hdev_is_powered(hdev)) {
2602 		err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2603 				 MGMT_STATUS_NOT_POWERED);
2604 		goto unlock;
2605 	}
2606 
2607 	i = 0;
2608 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2609 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2610 			i++;
2611 	}
2612 
2613 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2614 	rp = kmalloc(rp_len, GFP_KERNEL);
2615 	if (!rp) {
2616 		err = -ENOMEM;
2617 		goto unlock;
2618 	}
2619 
2620 	i = 0;
2621 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2622 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2623 			continue;
2624 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2625 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2626 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2627 			continue;
2628 		i++;
2629 	}
2630 
2631 	rp->conn_count = cpu_to_le16(i);
2632 
2633 	/* Recalculate length in case of filtered SCO connections, etc */
2634 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2635 
2636 	err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2637 			   rp_len);
2638 
2639 	kfree(rp);
2640 
2641 unlock:
2642 	hci_dev_unlock(hdev);
2643 	return err;
2644 }
2645 
2646 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2647 				   struct mgmt_cp_pin_code_neg_reply *cp)
2648 {
2649 	struct pending_cmd *cmd;
2650 	int err;
2651 
2652 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2653 			       sizeof(*cp));
2654 	if (!cmd)
2655 		return -ENOMEM;
2656 
2657 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2659 	if (err < 0)
2660 		mgmt_pending_remove(cmd);
2661 
2662 	return err;
2663 }
2664 
2665 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2666 			  u16 len)
2667 {
2668 	struct hci_conn *conn;
2669 	struct mgmt_cp_pin_code_reply *cp = data;
2670 	struct hci_cp_pin_code_reply reply;
2671 	struct pending_cmd *cmd;
2672 	int err;
2673 
2674 	BT_DBG("");
2675 
2676 	hci_dev_lock(hdev);
2677 
2678 	if (!hdev_is_powered(hdev)) {
2679 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680 				 MGMT_STATUS_NOT_POWERED);
2681 		goto failed;
2682 	}
2683 
2684 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2685 	if (!conn) {
2686 		err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687 				 MGMT_STATUS_NOT_CONNECTED);
2688 		goto failed;
2689 	}
2690 
2691 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692 		struct mgmt_cp_pin_code_neg_reply ncp;
2693 
2694 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2695 
2696 		BT_ERR("PIN code is not 16 bytes long");
2697 
2698 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2699 		if (err >= 0)
2700 			err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 					 MGMT_STATUS_INVALID_PARAMS);
2702 
2703 		goto failed;
2704 	}
2705 
2706 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2707 	if (!cmd) {
2708 		err = -ENOMEM;
2709 		goto failed;
2710 	}
2711 
2712 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2713 	reply.pin_len = cp->pin_len;
2714 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2715 
2716 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
2725 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2726 			     u16 len)
2727 {
2728 	struct mgmt_cp_set_io_capability *cp = data;
2729 
2730 	BT_DBG("");
2731 
2732 	hci_dev_lock(hdev);
2733 
2734 	hdev->io_capability = cp->io_capability;
2735 
2736 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2737 	       hdev->io_capability);
2738 
2739 	hci_dev_unlock(hdev);
2740 
2741 	return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2742 			    0);
2743 }
2744 
2745 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2746 {
2747 	struct hci_dev *hdev = conn->hdev;
2748 	struct pending_cmd *cmd;
2749 
2750 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2751 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2752 			continue;
2753 
2754 		if (cmd->user_data != conn)
2755 			continue;
2756 
2757 		return cmd;
2758 	}
2759 
2760 	return NULL;
2761 }
2762 
2763 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2764 {
2765 	struct mgmt_rp_pair_device rp;
2766 	struct hci_conn *conn = cmd->user_data;
2767 
2768 	bacpy(&rp.addr.bdaddr, &conn->dst);
2769 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2770 
2771 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2772 		     &rp, sizeof(rp));
2773 
2774 	/* So we don't get further callbacks for this connection */
2775 	conn->connect_cfm_cb = NULL;
2776 	conn->security_cfm_cb = NULL;
2777 	conn->disconn_cfm_cb = NULL;
2778 
2779 	hci_conn_drop(conn);
2780 
2781 	mgmt_pending_remove(cmd);
2782 }
2783 
2784 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2785 {
2786 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2787 	struct pending_cmd *cmd;
2788 
2789 	cmd = find_pairing(conn);
2790 	if (cmd)
2791 		pairing_complete(cmd, status);
2792 }
2793 
2794 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2795 {
2796 	struct pending_cmd *cmd;
2797 
2798 	BT_DBG("status %u", status);
2799 
2800 	cmd = find_pairing(conn);
2801 	if (!cmd)
2802 		BT_DBG("Unable to find a pending command");
2803 	else
2804 		pairing_complete(cmd, mgmt_status(status));
2805 }
2806 
2807 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2808 {
2809 	struct pending_cmd *cmd;
2810 
2811 	BT_DBG("status %u", status);
2812 
2813 	if (!status)
2814 		return;
2815 
2816 	cmd = find_pairing(conn);
2817 	if (!cmd)
2818 		BT_DBG("Unable to find a pending command");
2819 	else
2820 		pairing_complete(cmd, mgmt_status(status));
2821 }
2822 
2823 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2824 		       u16 len)
2825 {
2826 	struct mgmt_cp_pair_device *cp = data;
2827 	struct mgmt_rp_pair_device rp;
2828 	struct pending_cmd *cmd;
2829 	u8 sec_level, auth_type;
2830 	struct hci_conn *conn;
2831 	int err;
2832 
2833 	BT_DBG("");
2834 
2835 	memset(&rp, 0, sizeof(rp));
2836 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2837 	rp.addr.type = cp->addr.type;
2838 
2839 	if (!bdaddr_type_is_valid(cp->addr.type))
2840 		return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2841 				    MGMT_STATUS_INVALID_PARAMS,
2842 				    &rp, sizeof(rp));
2843 
2844 	hci_dev_lock(hdev);
2845 
2846 	if (!hdev_is_powered(hdev)) {
2847 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2848 				   MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2849 		goto unlock;
2850 	}
2851 
2852 	sec_level = BT_SECURITY_MEDIUM;
2853 	auth_type = HCI_AT_DEDICATED_BONDING;
2854 
2855 	if (cp->addr.type == BDADDR_BREDR) {
2856 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2857 				       auth_type);
2858 	} else {
2859 		u8 addr_type;
2860 
2861 		/* Convert from L2CAP channel address type to HCI address type
2862 		 */
2863 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2864 			addr_type = ADDR_LE_DEV_PUBLIC;
2865 		else
2866 			addr_type = ADDR_LE_DEV_RANDOM;
2867 
2868 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2869 				      sec_level, auth_type);
2870 	}
2871 
2872 	if (IS_ERR(conn)) {
2873 		int status;
2874 
2875 		if (PTR_ERR(conn) == -EBUSY)
2876 			status = MGMT_STATUS_BUSY;
2877 		else
2878 			status = MGMT_STATUS_CONNECT_FAILED;
2879 
2880 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2881 				   status, &rp,
2882 				   sizeof(rp));
2883 		goto unlock;
2884 	}
2885 
2886 	if (conn->connect_cfm_cb) {
2887 		hci_conn_drop(conn);
2888 		err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 				   MGMT_STATUS_BUSY, &rp, sizeof(rp));
2890 		goto unlock;
2891 	}
2892 
2893 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2894 	if (!cmd) {
2895 		err = -ENOMEM;
2896 		hci_conn_drop(conn);
2897 		goto unlock;
2898 	}
2899 
2900 	/* For LE, just connecting isn't a proof that the pairing finished */
2901 	if (cp->addr.type == BDADDR_BREDR) {
2902 		conn->connect_cfm_cb = pairing_complete_cb;
2903 		conn->security_cfm_cb = pairing_complete_cb;
2904 		conn->disconn_cfm_cb = pairing_complete_cb;
2905 	} else {
2906 		conn->connect_cfm_cb = le_pairing_complete_cb;
2907 		conn->security_cfm_cb = le_pairing_complete_cb;
2908 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2909 	}
2910 
2911 	conn->io_capability = cp->io_cap;
2912 	cmd->user_data = conn;
2913 
2914 	if (conn->state == BT_CONNECTED &&
2915 	    hci_conn_security(conn, sec_level, auth_type))
2916 		pairing_complete(cmd, 0);
2917 
2918 	err = 0;
2919 
2920 unlock:
2921 	hci_dev_unlock(hdev);
2922 	return err;
2923 }
2924 
2925 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2926 			      u16 len)
2927 {
2928 	struct mgmt_addr_info *addr = data;
2929 	struct pending_cmd *cmd;
2930 	struct hci_conn *conn;
2931 	int err;
2932 
2933 	BT_DBG("");
2934 
2935 	hci_dev_lock(hdev);
2936 
2937 	if (!hdev_is_powered(hdev)) {
2938 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2939 				 MGMT_STATUS_NOT_POWERED);
2940 		goto unlock;
2941 	}
2942 
2943 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2944 	if (!cmd) {
2945 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2946 				 MGMT_STATUS_INVALID_PARAMS);
2947 		goto unlock;
2948 	}
2949 
2950 	conn = cmd->user_data;
2951 
2952 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2953 		err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2954 				 MGMT_STATUS_INVALID_PARAMS);
2955 		goto unlock;
2956 	}
2957 
2958 	pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2959 
2960 	err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2961 			   addr, sizeof(*addr));
2962 unlock:
2963 	hci_dev_unlock(hdev);
2964 	return err;
2965 }
2966 
2967 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2968 			     struct mgmt_addr_info *addr, u16 mgmt_op,
2969 			     u16 hci_op, __le32 passkey)
2970 {
2971 	struct pending_cmd *cmd;
2972 	struct hci_conn *conn;
2973 	int err;
2974 
2975 	hci_dev_lock(hdev);
2976 
2977 	if (!hdev_is_powered(hdev)) {
2978 		err = cmd_complete(sk, hdev->id, mgmt_op,
2979 				   MGMT_STATUS_NOT_POWERED, addr,
2980 				   sizeof(*addr));
2981 		goto done;
2982 	}
2983 
2984 	if (addr->type == BDADDR_BREDR)
2985 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2986 	else
2987 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2988 
2989 	if (!conn) {
2990 		err = cmd_complete(sk, hdev->id, mgmt_op,
2991 				   MGMT_STATUS_NOT_CONNECTED, addr,
2992 				   sizeof(*addr));
2993 		goto done;
2994 	}
2995 
2996 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2997 		/* Continue with pairing via SMP */
2998 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2999 
3000 		if (!err)
3001 			err = cmd_complete(sk, hdev->id, mgmt_op,
3002 					   MGMT_STATUS_SUCCESS, addr,
3003 					   sizeof(*addr));
3004 		else
3005 			err = cmd_complete(sk, hdev->id, mgmt_op,
3006 					   MGMT_STATUS_FAILED, addr,
3007 					   sizeof(*addr));
3008 
3009 		goto done;
3010 	}
3011 
3012 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3013 	if (!cmd) {
3014 		err = -ENOMEM;
3015 		goto done;
3016 	}
3017 
3018 	/* Continue with pairing via HCI */
3019 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3020 		struct hci_cp_user_passkey_reply cp;
3021 
3022 		bacpy(&cp.bdaddr, &addr->bdaddr);
3023 		cp.passkey = passkey;
3024 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3025 	} else
3026 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3027 				   &addr->bdaddr);
3028 
3029 	if (err < 0)
3030 		mgmt_pending_remove(cmd);
3031 
3032 done:
3033 	hci_dev_unlock(hdev);
3034 	return err;
3035 }
3036 
3037 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3038 			      void *data, u16 len)
3039 {
3040 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3041 
3042 	BT_DBG("");
3043 
3044 	return user_pairing_resp(sk, hdev, &cp->addr,
3045 				MGMT_OP_PIN_CODE_NEG_REPLY,
3046 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3047 }
3048 
3049 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3050 			      u16 len)
3051 {
3052 	struct mgmt_cp_user_confirm_reply *cp = data;
3053 
3054 	BT_DBG("");
3055 
3056 	if (len != sizeof(*cp))
3057 		return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3058 				  MGMT_STATUS_INVALID_PARAMS);
3059 
3060 	return user_pairing_resp(sk, hdev, &cp->addr,
3061 				 MGMT_OP_USER_CONFIRM_REPLY,
3062 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3063 }
3064 
3065 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3066 				  void *data, u16 len)
3067 {
3068 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3069 
3070 	BT_DBG("");
3071 
3072 	return user_pairing_resp(sk, hdev, &cp->addr,
3073 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3074 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3075 }
3076 
3077 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3078 			      u16 len)
3079 {
3080 	struct mgmt_cp_user_passkey_reply *cp = data;
3081 
3082 	BT_DBG("");
3083 
3084 	return user_pairing_resp(sk, hdev, &cp->addr,
3085 				 MGMT_OP_USER_PASSKEY_REPLY,
3086 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3087 }
3088 
3089 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3090 				  void *data, u16 len)
3091 {
3092 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3093 
3094 	BT_DBG("");
3095 
3096 	return user_pairing_resp(sk, hdev, &cp->addr,
3097 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3098 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3099 }
3100 
3101 static void update_name(struct hci_request *req)
3102 {
3103 	struct hci_dev *hdev = req->hdev;
3104 	struct hci_cp_write_local_name cp;
3105 
3106 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3107 
3108 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3109 }
3110 
3111 static void set_name_complete(struct hci_dev *hdev, u8 status)
3112 {
3113 	struct mgmt_cp_set_local_name *cp;
3114 	struct pending_cmd *cmd;
3115 
3116 	BT_DBG("status 0x%02x", status);
3117 
3118 	hci_dev_lock(hdev);
3119 
3120 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3121 	if (!cmd)
3122 		goto unlock;
3123 
3124 	cp = cmd->param;
3125 
3126 	if (status)
3127 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3128 			   mgmt_status(status));
3129 	else
3130 		cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3131 			     cp, sizeof(*cp));
3132 
3133 	mgmt_pending_remove(cmd);
3134 
3135 unlock:
3136 	hci_dev_unlock(hdev);
3137 }
3138 
3139 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3140 			  u16 len)
3141 {
3142 	struct mgmt_cp_set_local_name *cp = data;
3143 	struct pending_cmd *cmd;
3144 	struct hci_request req;
3145 	int err;
3146 
3147 	BT_DBG("");
3148 
3149 	hci_dev_lock(hdev);
3150 
3151 	/* If the old values are the same as the new ones just return a
3152 	 * direct command complete event.
3153 	 */
3154 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3155 	    !memcmp(hdev->short_name, cp->short_name,
3156 		    sizeof(hdev->short_name))) {
3157 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3158 				   data, len);
3159 		goto failed;
3160 	}
3161 
3162 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3163 
3164 	if (!hdev_is_powered(hdev)) {
3165 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3166 
3167 		err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3168 				   data, len);
3169 		if (err < 0)
3170 			goto failed;
3171 
3172 		err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3173 				 sk);
3174 
3175 		goto failed;
3176 	}
3177 
3178 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3179 	if (!cmd) {
3180 		err = -ENOMEM;
3181 		goto failed;
3182 	}
3183 
3184 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3185 
3186 	hci_req_init(&req, hdev);
3187 
3188 	if (lmp_bredr_capable(hdev)) {
3189 		update_name(&req);
3190 		update_eir(&req);
3191 	}
3192 
3193 	/* The name is stored in the scan response data and so
3194 	 * no need to udpate the advertising data here.
3195 	 */
3196 	if (lmp_le_capable(hdev))
3197 		update_scan_rsp_data(&req);
3198 
3199 	err = hci_req_run(&req, set_name_complete);
3200 	if (err < 0)
3201 		mgmt_pending_remove(cmd);
3202 
3203 failed:
3204 	hci_dev_unlock(hdev);
3205 	return err;
3206 }
3207 
3208 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3209 			       void *data, u16 data_len)
3210 {
3211 	struct pending_cmd *cmd;
3212 	int err;
3213 
3214 	BT_DBG("%s", hdev->name);
3215 
3216 	hci_dev_lock(hdev);
3217 
3218 	if (!hdev_is_powered(hdev)) {
3219 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3220 				 MGMT_STATUS_NOT_POWERED);
3221 		goto unlock;
3222 	}
3223 
3224 	if (!lmp_ssp_capable(hdev)) {
3225 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3226 				 MGMT_STATUS_NOT_SUPPORTED);
3227 		goto unlock;
3228 	}
3229 
3230 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3231 		err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3232 				 MGMT_STATUS_BUSY);
3233 		goto unlock;
3234 	}
3235 
3236 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3237 	if (!cmd) {
3238 		err = -ENOMEM;
3239 		goto unlock;
3240 	}
3241 
3242 	if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3243 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3244 				   0, NULL);
3245 	else
3246 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3247 
3248 	if (err < 0)
3249 		mgmt_pending_remove(cmd);
3250 
3251 unlock:
3252 	hci_dev_unlock(hdev);
3253 	return err;
3254 }
3255 
3256 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3257 			       void *data, u16 len)
3258 {
3259 	int err;
3260 
3261 	BT_DBG("%s ", hdev->name);
3262 
3263 	hci_dev_lock(hdev);
3264 
3265 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3266 		struct mgmt_cp_add_remote_oob_data *cp = data;
3267 		u8 status;
3268 
3269 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3270 					      cp->hash, cp->randomizer);
3271 		if (err < 0)
3272 			status = MGMT_STATUS_FAILED;
3273 		else
3274 			status = MGMT_STATUS_SUCCESS;
3275 
3276 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3277 				   status, &cp->addr, sizeof(cp->addr));
3278 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3279 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3280 		u8 status;
3281 
3282 		err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3283 						  cp->hash192,
3284 						  cp->randomizer192,
3285 						  cp->hash256,
3286 						  cp->randomizer256);
3287 		if (err < 0)
3288 			status = MGMT_STATUS_FAILED;
3289 		else
3290 			status = MGMT_STATUS_SUCCESS;
3291 
3292 		err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3293 				   status, &cp->addr, sizeof(cp->addr));
3294 	} else {
3295 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3296 		err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3297 				 MGMT_STATUS_INVALID_PARAMS);
3298 	}
3299 
3300 	hci_dev_unlock(hdev);
3301 	return err;
3302 }
3303 
3304 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3305 				  void *data, u16 len)
3306 {
3307 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3308 	u8 status;
3309 	int err;
3310 
3311 	BT_DBG("%s", hdev->name);
3312 
3313 	hci_dev_lock(hdev);
3314 
3315 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3316 	if (err < 0)
3317 		status = MGMT_STATUS_INVALID_PARAMS;
3318 	else
3319 		status = MGMT_STATUS_SUCCESS;
3320 
3321 	err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3322 			   status, &cp->addr, sizeof(cp->addr));
3323 
3324 	hci_dev_unlock(hdev);
3325 	return err;
3326 }
3327 
3328 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3329 {
3330 	struct pending_cmd *cmd;
3331 	u8 type;
3332 	int err;
3333 
3334 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3335 
3336 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3337 	if (!cmd)
3338 		return -ENOENT;
3339 
3340 	type = hdev->discovery.type;
3341 
3342 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3343 			   &type, sizeof(type));
3344 	mgmt_pending_remove(cmd);
3345 
3346 	return err;
3347 }
3348 
3349 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3350 {
3351 	unsigned long timeout = 0;
3352 
3353 	BT_DBG("status %d", status);
3354 
3355 	if (status) {
3356 		hci_dev_lock(hdev);
3357 		mgmt_start_discovery_failed(hdev, status);
3358 		hci_dev_unlock(hdev);
3359 		return;
3360 	}
3361 
3362 	hci_dev_lock(hdev);
3363 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3364 	hci_dev_unlock(hdev);
3365 
3366 	switch (hdev->discovery.type) {
3367 	case DISCOV_TYPE_LE:
3368 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3369 		break;
3370 
3371 	case DISCOV_TYPE_INTERLEAVED:
3372 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3373 		break;
3374 
3375 	case DISCOV_TYPE_BREDR:
3376 		break;
3377 
3378 	default:
3379 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3380 	}
3381 
3382 	if (!timeout)
3383 		return;
3384 
3385 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3386 }
3387 
3388 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3389 			   void *data, u16 len)
3390 {
3391 	struct mgmt_cp_start_discovery *cp = data;
3392 	struct pending_cmd *cmd;
3393 	struct hci_cp_le_set_scan_param param_cp;
3394 	struct hci_cp_le_set_scan_enable enable_cp;
3395 	struct hci_cp_inquiry inq_cp;
3396 	struct hci_request req;
3397 	/* General inquiry access code (GIAC) */
3398 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3399 	u8 status, own_addr_type;
3400 	int err;
3401 
3402 	BT_DBG("%s", hdev->name);
3403 
3404 	hci_dev_lock(hdev);
3405 
3406 	if (!hdev_is_powered(hdev)) {
3407 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3408 				 MGMT_STATUS_NOT_POWERED);
3409 		goto failed;
3410 	}
3411 
3412 	if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3413 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3414 				 MGMT_STATUS_BUSY);
3415 		goto failed;
3416 	}
3417 
3418 	if (hdev->discovery.state != DISCOVERY_STOPPED) {
3419 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3420 				 MGMT_STATUS_BUSY);
3421 		goto failed;
3422 	}
3423 
3424 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3425 	if (!cmd) {
3426 		err = -ENOMEM;
3427 		goto failed;
3428 	}
3429 
3430 	hdev->discovery.type = cp->type;
3431 
3432 	hci_req_init(&req, hdev);
3433 
3434 	switch (hdev->discovery.type) {
3435 	case DISCOV_TYPE_BREDR:
3436 		status = mgmt_bredr_support(hdev);
3437 		if (status) {
3438 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3439 					 status);
3440 			mgmt_pending_remove(cmd);
3441 			goto failed;
3442 		}
3443 
3444 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3445 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3446 					 MGMT_STATUS_BUSY);
3447 			mgmt_pending_remove(cmd);
3448 			goto failed;
3449 		}
3450 
3451 		hci_inquiry_cache_flush(hdev);
3452 
3453 		memset(&inq_cp, 0, sizeof(inq_cp));
3454 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3455 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3456 		hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3457 		break;
3458 
3459 	case DISCOV_TYPE_LE:
3460 	case DISCOV_TYPE_INTERLEAVED:
3461 		status = mgmt_le_support(hdev);
3462 		if (status) {
3463 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3464 					 status);
3465 			mgmt_pending_remove(cmd);
3466 			goto failed;
3467 		}
3468 
3469 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3470 		    !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3471 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3472 					 MGMT_STATUS_NOT_SUPPORTED);
3473 			mgmt_pending_remove(cmd);
3474 			goto failed;
3475 		}
3476 
3477 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3478 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3479 					 MGMT_STATUS_REJECTED);
3480 			mgmt_pending_remove(cmd);
3481 			goto failed;
3482 		}
3483 
3484 		/* If controller is scanning, it means the background scanning
3485 		 * is running. Thus, we should temporarily stop it in order to
3486 		 * set the discovery scanning parameters.
3487 		 */
3488 		if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3489 			hci_req_add_le_scan_disable(&req);
3490 
3491 		memset(&param_cp, 0, sizeof(param_cp));
3492 
3493 		/* All active scans will be done with either a resolvable
3494 		 * private address (when privacy feature has been enabled)
3495 		 * or unresolvable private address.
3496 		 */
3497 		err = hci_update_random_address(&req, true, &own_addr_type);
3498 		if (err < 0) {
3499 			err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3500 					 MGMT_STATUS_FAILED);
3501 			mgmt_pending_remove(cmd);
3502 			goto failed;
3503 		}
3504 
3505 		param_cp.type = LE_SCAN_ACTIVE;
3506 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3507 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3508 		param_cp.own_address_type = own_addr_type;
3509 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3510 			    &param_cp);
3511 
3512 		memset(&enable_cp, 0, sizeof(enable_cp));
3513 		enable_cp.enable = LE_SCAN_ENABLE;
3514 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3515 		hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3516 			    &enable_cp);
3517 		break;
3518 
3519 	default:
3520 		err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3521 				 MGMT_STATUS_INVALID_PARAMS);
3522 		mgmt_pending_remove(cmd);
3523 		goto failed;
3524 	}
3525 
3526 	err = hci_req_run(&req, start_discovery_complete);
3527 	if (err < 0)
3528 		mgmt_pending_remove(cmd);
3529 	else
3530 		hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3531 
3532 failed:
3533 	hci_dev_unlock(hdev);
3534 	return err;
3535 }
3536 
3537 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3538 {
3539 	struct pending_cmd *cmd;
3540 	int err;
3541 
3542 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3543 	if (!cmd)
3544 		return -ENOENT;
3545 
3546 	err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3547 			   &hdev->discovery.type, sizeof(hdev->discovery.type));
3548 	mgmt_pending_remove(cmd);
3549 
3550 	return err;
3551 }
3552 
3553 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3554 {
3555 	BT_DBG("status %d", status);
3556 
3557 	hci_dev_lock(hdev);
3558 
3559 	if (status) {
3560 		mgmt_stop_discovery_failed(hdev, status);
3561 		goto unlock;
3562 	}
3563 
3564 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3565 
3566 unlock:
3567 	hci_dev_unlock(hdev);
3568 }
3569 
3570 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3571 			  u16 len)
3572 {
3573 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3574 	struct pending_cmd *cmd;
3575 	struct hci_cp_remote_name_req_cancel cp;
3576 	struct inquiry_entry *e;
3577 	struct hci_request req;
3578 	int err;
3579 
3580 	BT_DBG("%s", hdev->name);
3581 
3582 	hci_dev_lock(hdev);
3583 
3584 	if (!hci_discovery_active(hdev)) {
3585 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3586 				   MGMT_STATUS_REJECTED, &mgmt_cp->type,
3587 				   sizeof(mgmt_cp->type));
3588 		goto unlock;
3589 	}
3590 
3591 	if (hdev->discovery.type != mgmt_cp->type) {
3592 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3593 				   MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3594 				   sizeof(mgmt_cp->type));
3595 		goto unlock;
3596 	}
3597 
3598 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3599 	if (!cmd) {
3600 		err = -ENOMEM;
3601 		goto unlock;
3602 	}
3603 
3604 	hci_req_init(&req, hdev);
3605 
3606 	switch (hdev->discovery.state) {
3607 	case DISCOVERY_FINDING:
3608 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3609 			hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3610 		} else {
3611 			cancel_delayed_work(&hdev->le_scan_disable);
3612 
3613 			hci_req_add_le_scan_disable(&req);
3614 		}
3615 
3616 		break;
3617 
3618 	case DISCOVERY_RESOLVING:
3619 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3620 						     NAME_PENDING);
3621 		if (!e) {
3622 			mgmt_pending_remove(cmd);
3623 			err = cmd_complete(sk, hdev->id,
3624 					   MGMT_OP_STOP_DISCOVERY, 0,
3625 					   &mgmt_cp->type,
3626 					   sizeof(mgmt_cp->type));
3627 			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3628 			goto unlock;
3629 		}
3630 
3631 		bacpy(&cp.bdaddr, &e->data.bdaddr);
3632 		hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3633 			    &cp);
3634 
3635 		break;
3636 
3637 	default:
3638 		BT_DBG("unknown discovery state %u", hdev->discovery.state);
3639 
3640 		mgmt_pending_remove(cmd);
3641 		err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3642 				   MGMT_STATUS_FAILED, &mgmt_cp->type,
3643 				   sizeof(mgmt_cp->type));
3644 		goto unlock;
3645 	}
3646 
3647 	err = hci_req_run(&req, stop_discovery_complete);
3648 	if (err < 0)
3649 		mgmt_pending_remove(cmd);
3650 	else
3651 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3652 
3653 unlock:
3654 	hci_dev_unlock(hdev);
3655 	return err;
3656 }
3657 
3658 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3659 			u16 len)
3660 {
3661 	struct mgmt_cp_confirm_name *cp = data;
3662 	struct inquiry_entry *e;
3663 	int err;
3664 
3665 	BT_DBG("%s", hdev->name);
3666 
3667 	hci_dev_lock(hdev);
3668 
3669 	if (!hci_discovery_active(hdev)) {
3670 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3671 				   MGMT_STATUS_FAILED, &cp->addr,
3672 				   sizeof(cp->addr));
3673 		goto failed;
3674 	}
3675 
3676 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3677 	if (!e) {
3678 		err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3679 				   MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3680 				   sizeof(cp->addr));
3681 		goto failed;
3682 	}
3683 
3684 	if (cp->name_known) {
3685 		e->name_state = NAME_KNOWN;
3686 		list_del(&e->list);
3687 	} else {
3688 		e->name_state = NAME_NEEDED;
3689 		hci_inquiry_cache_update_resolve(hdev, e);
3690 	}
3691 
3692 	err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3693 			   sizeof(cp->addr));
3694 
3695 failed:
3696 	hci_dev_unlock(hdev);
3697 	return err;
3698 }
3699 
3700 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3701 			u16 len)
3702 {
3703 	struct mgmt_cp_block_device *cp = data;
3704 	u8 status;
3705 	int err;
3706 
3707 	BT_DBG("%s", hdev->name);
3708 
3709 	if (!bdaddr_type_is_valid(cp->addr.type))
3710 		return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3711 				    MGMT_STATUS_INVALID_PARAMS,
3712 				    &cp->addr, sizeof(cp->addr));
3713 
3714 	hci_dev_lock(hdev);
3715 
3716 	err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3717 	if (err < 0)
3718 		status = MGMT_STATUS_FAILED;
3719 	else
3720 		status = MGMT_STATUS_SUCCESS;
3721 
3722 	err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3723 			   &cp->addr, sizeof(cp->addr));
3724 
3725 	hci_dev_unlock(hdev);
3726 
3727 	return err;
3728 }
3729 
3730 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3731 			  u16 len)
3732 {
3733 	struct mgmt_cp_unblock_device *cp = data;
3734 	u8 status;
3735 	int err;
3736 
3737 	BT_DBG("%s", hdev->name);
3738 
3739 	if (!bdaddr_type_is_valid(cp->addr.type))
3740 		return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3741 				    MGMT_STATUS_INVALID_PARAMS,
3742 				    &cp->addr, sizeof(cp->addr));
3743 
3744 	hci_dev_lock(hdev);
3745 
3746 	err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3747 	if (err < 0)
3748 		status = MGMT_STATUS_INVALID_PARAMS;
3749 	else
3750 		status = MGMT_STATUS_SUCCESS;
3751 
3752 	err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3753 			   &cp->addr, sizeof(cp->addr));
3754 
3755 	hci_dev_unlock(hdev);
3756 
3757 	return err;
3758 }
3759 
3760 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3761 			 u16 len)
3762 {
3763 	struct mgmt_cp_set_device_id *cp = data;
3764 	struct hci_request req;
3765 	int err;
3766 	__u16 source;
3767 
3768 	BT_DBG("%s", hdev->name);
3769 
3770 	source = __le16_to_cpu(cp->source);
3771 
3772 	if (source > 0x0002)
3773 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3774 				  MGMT_STATUS_INVALID_PARAMS);
3775 
3776 	hci_dev_lock(hdev);
3777 
3778 	hdev->devid_source = source;
3779 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3780 	hdev->devid_product = __le16_to_cpu(cp->product);
3781 	hdev->devid_version = __le16_to_cpu(cp->version);
3782 
3783 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3784 
3785 	hci_req_init(&req, hdev);
3786 	update_eir(&req);
3787 	hci_req_run(&req, NULL);
3788 
3789 	hci_dev_unlock(hdev);
3790 
3791 	return err;
3792 }
3793 
3794 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3795 {
3796 	struct cmd_lookup match = { NULL, hdev };
3797 
3798 	if (status) {
3799 		u8 mgmt_err = mgmt_status(status);
3800 
3801 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3802 				     cmd_status_rsp, &mgmt_err);
3803 		return;
3804 	}
3805 
3806 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3807 			     &match);
3808 
3809 	new_settings(hdev, match.sk);
3810 
3811 	if (match.sk)
3812 		sock_put(match.sk);
3813 }
3814 
3815 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3816 			   u16 len)
3817 {
3818 	struct mgmt_mode *cp = data;
3819 	struct pending_cmd *cmd;
3820 	struct hci_request req;
3821 	u8 val, enabled, status;
3822 	int err;
3823 
3824 	BT_DBG("request for %s", hdev->name);
3825 
3826 	status = mgmt_le_support(hdev);
3827 	if (status)
3828 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3829 				  status);
3830 
3831 	if (cp->val != 0x00 && cp->val != 0x01)
3832 		return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3833 				  MGMT_STATUS_INVALID_PARAMS);
3834 
3835 	hci_dev_lock(hdev);
3836 
3837 	val = !!cp->val;
3838 	enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3839 
3840 	/* The following conditions are ones which mean that we should
3841 	 * not do any HCI communication but directly send a mgmt
3842 	 * response to user space (after toggling the flag if
3843 	 * necessary).
3844 	 */
3845 	if (!hdev_is_powered(hdev) || val == enabled ||
3846 	    hci_conn_num(hdev, LE_LINK) > 0) {
3847 		bool changed = false;
3848 
3849 		if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3850 			change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3851 			changed = true;
3852 		}
3853 
3854 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3855 		if (err < 0)
3856 			goto unlock;
3857 
3858 		if (changed)
3859 			err = new_settings(hdev, sk);
3860 
3861 		goto unlock;
3862 	}
3863 
3864 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3865 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3866 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3867 				 MGMT_STATUS_BUSY);
3868 		goto unlock;
3869 	}
3870 
3871 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3872 	if (!cmd) {
3873 		err = -ENOMEM;
3874 		goto unlock;
3875 	}
3876 
3877 	hci_req_init(&req, hdev);
3878 
3879 	if (val)
3880 		enable_advertising(&req);
3881 	else
3882 		disable_advertising(&req);
3883 
3884 	err = hci_req_run(&req, set_advertising_complete);
3885 	if (err < 0)
3886 		mgmt_pending_remove(cmd);
3887 
3888 unlock:
3889 	hci_dev_unlock(hdev);
3890 	return err;
3891 }
3892 
3893 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3894 			      void *data, u16 len)
3895 {
3896 	struct mgmt_cp_set_static_address *cp = data;
3897 	int err;
3898 
3899 	BT_DBG("%s", hdev->name);
3900 
3901 	if (!lmp_le_capable(hdev))
3902 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3903 				  MGMT_STATUS_NOT_SUPPORTED);
3904 
3905 	if (hdev_is_powered(hdev))
3906 		return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3907 				  MGMT_STATUS_REJECTED);
3908 
3909 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3910 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3911 			return cmd_status(sk, hdev->id,
3912 					  MGMT_OP_SET_STATIC_ADDRESS,
3913 					  MGMT_STATUS_INVALID_PARAMS);
3914 
3915 		/* Two most significant bits shall be set */
3916 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3917 			return cmd_status(sk, hdev->id,
3918 					  MGMT_OP_SET_STATIC_ADDRESS,
3919 					  MGMT_STATUS_INVALID_PARAMS);
3920 	}
3921 
3922 	hci_dev_lock(hdev);
3923 
3924 	bacpy(&hdev->static_addr, &cp->bdaddr);
3925 
3926 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3927 
3928 	hci_dev_unlock(hdev);
3929 
3930 	return err;
3931 }
3932 
3933 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3934 			   void *data, u16 len)
3935 {
3936 	struct mgmt_cp_set_scan_params *cp = data;
3937 	__u16 interval, window;
3938 	int err;
3939 
3940 	BT_DBG("%s", hdev->name);
3941 
3942 	if (!lmp_le_capable(hdev))
3943 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3944 				  MGMT_STATUS_NOT_SUPPORTED);
3945 
3946 	interval = __le16_to_cpu(cp->interval);
3947 
3948 	if (interval < 0x0004 || interval > 0x4000)
3949 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3950 				  MGMT_STATUS_INVALID_PARAMS);
3951 
3952 	window = __le16_to_cpu(cp->window);
3953 
3954 	if (window < 0x0004 || window > 0x4000)
3955 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3956 				  MGMT_STATUS_INVALID_PARAMS);
3957 
3958 	if (window > interval)
3959 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3960 				  MGMT_STATUS_INVALID_PARAMS);
3961 
3962 	hci_dev_lock(hdev);
3963 
3964 	hdev->le_scan_interval = interval;
3965 	hdev->le_scan_window = window;
3966 
3967 	err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3968 
3969 	/* If background scan is running, restart it so new parameters are
3970 	 * loaded.
3971 	 */
3972 	if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3973 	    hdev->discovery.state == DISCOVERY_STOPPED) {
3974 		struct hci_request req;
3975 
3976 		hci_req_init(&req, hdev);
3977 
3978 		hci_req_add_le_scan_disable(&req);
3979 		hci_req_add_le_passive_scan(&req);
3980 
3981 		hci_req_run(&req, NULL);
3982 	}
3983 
3984 	hci_dev_unlock(hdev);
3985 
3986 	return err;
3987 }
3988 
3989 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3990 {
3991 	struct pending_cmd *cmd;
3992 
3993 	BT_DBG("status 0x%02x", status);
3994 
3995 	hci_dev_lock(hdev);
3996 
3997 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3998 	if (!cmd)
3999 		goto unlock;
4000 
4001 	if (status) {
4002 		cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4003 			   mgmt_status(status));
4004 	} else {
4005 		struct mgmt_mode *cp = cmd->param;
4006 
4007 		if (cp->val)
4008 			set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4009 		else
4010 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4011 
4012 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4013 		new_settings(hdev, cmd->sk);
4014 	}
4015 
4016 	mgmt_pending_remove(cmd);
4017 
4018 unlock:
4019 	hci_dev_unlock(hdev);
4020 }
4021 
4022 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4023 				void *data, u16 len)
4024 {
4025 	struct mgmt_mode *cp = data;
4026 	struct pending_cmd *cmd;
4027 	struct hci_request req;
4028 	int err;
4029 
4030 	BT_DBG("%s", hdev->name);
4031 
4032 	if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4033 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4034 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4035 				  MGMT_STATUS_NOT_SUPPORTED);
4036 
4037 	if (cp->val != 0x00 && cp->val != 0x01)
4038 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4039 				  MGMT_STATUS_INVALID_PARAMS);
4040 
4041 	if (!hdev_is_powered(hdev))
4042 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4043 				  MGMT_STATUS_NOT_POWERED);
4044 
4045 	if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4046 		return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4047 				  MGMT_STATUS_REJECTED);
4048 
4049 	hci_dev_lock(hdev);
4050 
4051 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4052 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4053 				 MGMT_STATUS_BUSY);
4054 		goto unlock;
4055 	}
4056 
4057 	if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4058 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4059 					hdev);
4060 		goto unlock;
4061 	}
4062 
4063 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4064 			       data, len);
4065 	if (!cmd) {
4066 		err = -ENOMEM;
4067 		goto unlock;
4068 	}
4069 
4070 	hci_req_init(&req, hdev);
4071 
4072 	write_fast_connectable(&req, cp->val);
4073 
4074 	err = hci_req_run(&req, fast_connectable_complete);
4075 	if (err < 0) {
4076 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4077 				 MGMT_STATUS_FAILED);
4078 		mgmt_pending_remove(cmd);
4079 	}
4080 
4081 unlock:
4082 	hci_dev_unlock(hdev);
4083 
4084 	return err;
4085 }
4086 
4087 static void set_bredr_scan(struct hci_request *req)
4088 {
4089 	struct hci_dev *hdev = req->hdev;
4090 	u8 scan = 0;
4091 
4092 	/* Ensure that fast connectable is disabled. This function will
4093 	 * not do anything if the page scan parameters are already what
4094 	 * they should be.
4095 	 */
4096 	write_fast_connectable(req, false);
4097 
4098 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4099 		scan |= SCAN_PAGE;
4100 	if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4101 		scan |= SCAN_INQUIRY;
4102 
4103 	if (scan)
4104 		hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4105 }
4106 
4107 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4108 {
4109 	struct pending_cmd *cmd;
4110 
4111 	BT_DBG("status 0x%02x", status);
4112 
4113 	hci_dev_lock(hdev);
4114 
4115 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4116 	if (!cmd)
4117 		goto unlock;
4118 
4119 	if (status) {
4120 		u8 mgmt_err = mgmt_status(status);
4121 
4122 		/* We need to restore the flag if related HCI commands
4123 		 * failed.
4124 		 */
4125 		clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4126 
4127 		cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4128 	} else {
4129 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4130 		new_settings(hdev, cmd->sk);
4131 	}
4132 
4133 	mgmt_pending_remove(cmd);
4134 
4135 unlock:
4136 	hci_dev_unlock(hdev);
4137 }
4138 
4139 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4140 {
4141 	struct mgmt_mode *cp = data;
4142 	struct pending_cmd *cmd;
4143 	struct hci_request req;
4144 	int err;
4145 
4146 	BT_DBG("request for %s", hdev->name);
4147 
4148 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4149 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4150 				  MGMT_STATUS_NOT_SUPPORTED);
4151 
4152 	if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4153 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4154 				  MGMT_STATUS_REJECTED);
4155 
4156 	if (cp->val != 0x00 && cp->val != 0x01)
4157 		return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4158 				  MGMT_STATUS_INVALID_PARAMS);
4159 
4160 	hci_dev_lock(hdev);
4161 
4162 	if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4163 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4164 		goto unlock;
4165 	}
4166 
4167 	if (!hdev_is_powered(hdev)) {
4168 		if (!cp->val) {
4169 			clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4170 			clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4171 			clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4172 			clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4173 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4174 		}
4175 
4176 		change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4177 
4178 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4179 		if (err < 0)
4180 			goto unlock;
4181 
4182 		err = new_settings(hdev, sk);
4183 		goto unlock;
4184 	}
4185 
4186 	/* Reject disabling when powered on */
4187 	if (!cp->val) {
4188 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4189 				 MGMT_STATUS_REJECTED);
4190 		goto unlock;
4191 	}
4192 
4193 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4194 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4195 				 MGMT_STATUS_BUSY);
4196 		goto unlock;
4197 	}
4198 
4199 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4200 	if (!cmd) {
4201 		err = -ENOMEM;
4202 		goto unlock;
4203 	}
4204 
4205 	/* We need to flip the bit already here so that update_adv_data
4206 	 * generates the correct flags.
4207 	 */
4208 	set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4209 
4210 	hci_req_init(&req, hdev);
4211 
4212 	if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4213 		set_bredr_scan(&req);
4214 
4215 	/* Since only the advertising data flags will change, there
4216 	 * is no need to update the scan response data.
4217 	 */
4218 	update_adv_data(&req);
4219 
4220 	err = hci_req_run(&req, set_bredr_complete);
4221 	if (err < 0)
4222 		mgmt_pending_remove(cmd);
4223 
4224 unlock:
4225 	hci_dev_unlock(hdev);
4226 	return err;
4227 }
4228 
4229 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4230 			   void *data, u16 len)
4231 {
4232 	struct mgmt_mode *cp = data;
4233 	struct pending_cmd *cmd;
4234 	u8 val, status;
4235 	int err;
4236 
4237 	BT_DBG("request for %s", hdev->name);
4238 
4239 	status = mgmt_bredr_support(hdev);
4240 	if (status)
4241 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4242 				  status);
4243 
4244 	if (!lmp_sc_capable(hdev) &&
4245 	    !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4246 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4247 				  MGMT_STATUS_NOT_SUPPORTED);
4248 
4249 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4250 		return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4251 				  MGMT_STATUS_INVALID_PARAMS);
4252 
4253 	hci_dev_lock(hdev);
4254 
4255 	if (!hdev_is_powered(hdev)) {
4256 		bool changed;
4257 
4258 		if (cp->val) {
4259 			changed = !test_and_set_bit(HCI_SC_ENABLED,
4260 						    &hdev->dev_flags);
4261 			if (cp->val == 0x02)
4262 				set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4263 			else
4264 				clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4265 		} else {
4266 			changed = test_and_clear_bit(HCI_SC_ENABLED,
4267 						     &hdev->dev_flags);
4268 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4269 		}
4270 
4271 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4272 		if (err < 0)
4273 			goto failed;
4274 
4275 		if (changed)
4276 			err = new_settings(hdev, sk);
4277 
4278 		goto failed;
4279 	}
4280 
4281 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4282 		err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4283 				 MGMT_STATUS_BUSY);
4284 		goto failed;
4285 	}
4286 
4287 	val = !!cp->val;
4288 
4289 	if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4290 	    (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4291 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4292 		goto failed;
4293 	}
4294 
4295 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4296 	if (!cmd) {
4297 		err = -ENOMEM;
4298 		goto failed;
4299 	}
4300 
4301 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4302 	if (err < 0) {
4303 		mgmt_pending_remove(cmd);
4304 		goto failed;
4305 	}
4306 
4307 	if (cp->val == 0x02)
4308 		set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4309 	else
4310 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4311 
4312 failed:
4313 	hci_dev_unlock(hdev);
4314 	return err;
4315 }
4316 
4317 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4318 			  void *data, u16 len)
4319 {
4320 	struct mgmt_mode *cp = data;
4321 	bool changed;
4322 	int err;
4323 
4324 	BT_DBG("request for %s", hdev->name);
4325 
4326 	if (cp->val != 0x00 && cp->val != 0x01)
4327 		return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4328 				  MGMT_STATUS_INVALID_PARAMS);
4329 
4330 	hci_dev_lock(hdev);
4331 
4332 	if (cp->val)
4333 		changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4334 	else
4335 		changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4336 
4337 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4338 	if (err < 0)
4339 		goto unlock;
4340 
4341 	if (changed)
4342 		err = new_settings(hdev, sk);
4343 
4344 unlock:
4345 	hci_dev_unlock(hdev);
4346 	return err;
4347 }
4348 
4349 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4350 		       u16 len)
4351 {
4352 	struct mgmt_cp_set_privacy *cp = cp_data;
4353 	bool changed;
4354 	int err;
4355 
4356 	BT_DBG("request for %s", hdev->name);
4357 
4358 	if (!lmp_le_capable(hdev))
4359 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4360 				  MGMT_STATUS_NOT_SUPPORTED);
4361 
4362 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
4363 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4364 				  MGMT_STATUS_INVALID_PARAMS);
4365 
4366 	if (hdev_is_powered(hdev))
4367 		return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4368 				  MGMT_STATUS_REJECTED);
4369 
4370 	hci_dev_lock(hdev);
4371 
4372 	/* If user space supports this command it is also expected to
4373 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4374 	 */
4375 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4376 
4377 	if (cp->privacy) {
4378 		changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4379 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4380 		set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4381 	} else {
4382 		changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4383 		memset(hdev->irk, 0, sizeof(hdev->irk));
4384 		clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4385 	}
4386 
4387 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4388 	if (err < 0)
4389 		goto unlock;
4390 
4391 	if (changed)
4392 		err = new_settings(hdev, sk);
4393 
4394 unlock:
4395 	hci_dev_unlock(hdev);
4396 	return err;
4397 }
4398 
4399 static bool irk_is_valid(struct mgmt_irk_info *irk)
4400 {
4401 	switch (irk->addr.type) {
4402 	case BDADDR_LE_PUBLIC:
4403 		return true;
4404 
4405 	case BDADDR_LE_RANDOM:
4406 		/* Two most significant bits shall be set */
4407 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4408 			return false;
4409 		return true;
4410 	}
4411 
4412 	return false;
4413 }
4414 
4415 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4416 		     u16 len)
4417 {
4418 	struct mgmt_cp_load_irks *cp = cp_data;
4419 	u16 irk_count, expected_len;
4420 	int i, err;
4421 
4422 	BT_DBG("request for %s", hdev->name);
4423 
4424 	if (!lmp_le_capable(hdev))
4425 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4426 				  MGMT_STATUS_NOT_SUPPORTED);
4427 
4428 	irk_count = __le16_to_cpu(cp->irk_count);
4429 
4430 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4431 	if (expected_len != len) {
4432 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
4433 		       expected_len, len);
4434 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4435 				  MGMT_STATUS_INVALID_PARAMS);
4436 	}
4437 
4438 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4439 
4440 	for (i = 0; i < irk_count; i++) {
4441 		struct mgmt_irk_info *key = &cp->irks[i];
4442 
4443 		if (!irk_is_valid(key))
4444 			return cmd_status(sk, hdev->id,
4445 					  MGMT_OP_LOAD_IRKS,
4446 					  MGMT_STATUS_INVALID_PARAMS);
4447 	}
4448 
4449 	hci_dev_lock(hdev);
4450 
4451 	hci_smp_irks_clear(hdev);
4452 
4453 	for (i = 0; i < irk_count; i++) {
4454 		struct mgmt_irk_info *irk = &cp->irks[i];
4455 		u8 addr_type;
4456 
4457 		if (irk->addr.type == BDADDR_LE_PUBLIC)
4458 			addr_type = ADDR_LE_DEV_PUBLIC;
4459 		else
4460 			addr_type = ADDR_LE_DEV_RANDOM;
4461 
4462 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4463 			    BDADDR_ANY);
4464 	}
4465 
4466 	set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4467 
4468 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4469 
4470 	hci_dev_unlock(hdev);
4471 
4472 	return err;
4473 }
4474 
4475 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4476 {
4477 	if (key->master != 0x00 && key->master != 0x01)
4478 		return false;
4479 
4480 	switch (key->addr.type) {
4481 	case BDADDR_LE_PUBLIC:
4482 		return true;
4483 
4484 	case BDADDR_LE_RANDOM:
4485 		/* Two most significant bits shall be set */
4486 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4487 			return false;
4488 		return true;
4489 	}
4490 
4491 	return false;
4492 }
4493 
4494 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4495 			       void *cp_data, u16 len)
4496 {
4497 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4498 	u16 key_count, expected_len;
4499 	int i, err;
4500 
4501 	BT_DBG("request for %s", hdev->name);
4502 
4503 	if (!lmp_le_capable(hdev))
4504 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4505 				  MGMT_STATUS_NOT_SUPPORTED);
4506 
4507 	key_count = __le16_to_cpu(cp->key_count);
4508 
4509 	expected_len = sizeof(*cp) + key_count *
4510 					sizeof(struct mgmt_ltk_info);
4511 	if (expected_len != len) {
4512 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
4513 		       expected_len, len);
4514 		return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4515 				  MGMT_STATUS_INVALID_PARAMS);
4516 	}
4517 
4518 	BT_DBG("%s key_count %u", hdev->name, key_count);
4519 
4520 	for (i = 0; i < key_count; i++) {
4521 		struct mgmt_ltk_info *key = &cp->keys[i];
4522 
4523 		if (!ltk_is_valid(key))
4524 			return cmd_status(sk, hdev->id,
4525 					  MGMT_OP_LOAD_LONG_TERM_KEYS,
4526 					  MGMT_STATUS_INVALID_PARAMS);
4527 	}
4528 
4529 	hci_dev_lock(hdev);
4530 
4531 	hci_smp_ltks_clear(hdev);
4532 
4533 	for (i = 0; i < key_count; i++) {
4534 		struct mgmt_ltk_info *key = &cp->keys[i];
4535 		u8 type, addr_type;
4536 
4537 		if (key->addr.type == BDADDR_LE_PUBLIC)
4538 			addr_type = ADDR_LE_DEV_PUBLIC;
4539 		else
4540 			addr_type = ADDR_LE_DEV_RANDOM;
4541 
4542 		if (key->master)
4543 			type = HCI_SMP_LTK;
4544 		else
4545 			type = HCI_SMP_LTK_SLAVE;
4546 
4547 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4548 			    key->type, key->val, key->enc_size, key->ediv,
4549 			    key->rand);
4550 	}
4551 
4552 	err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4553 			   NULL, 0);
4554 
4555 	hci_dev_unlock(hdev);
4556 
4557 	return err;
4558 }
4559 
4560 static const struct mgmt_handler {
4561 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4562 		     u16 data_len);
4563 	bool var_len;
4564 	size_t data_len;
4565 } mgmt_handlers[] = {
4566 	{ NULL }, /* 0x0000 (no command) */
4567 	{ read_version,           false, MGMT_READ_VERSION_SIZE },
4568 	{ read_commands,          false, MGMT_READ_COMMANDS_SIZE },
4569 	{ read_index_list,        false, MGMT_READ_INDEX_LIST_SIZE },
4570 	{ read_controller_info,   false, MGMT_READ_INFO_SIZE },
4571 	{ set_powered,            false, MGMT_SETTING_SIZE },
4572 	{ set_discoverable,       false, MGMT_SET_DISCOVERABLE_SIZE },
4573 	{ set_connectable,        false, MGMT_SETTING_SIZE },
4574 	{ set_fast_connectable,   false, MGMT_SETTING_SIZE },
4575 	{ set_pairable,           false, MGMT_SETTING_SIZE },
4576 	{ set_link_security,      false, MGMT_SETTING_SIZE },
4577 	{ set_ssp,                false, MGMT_SETTING_SIZE },
4578 	{ set_hs,                 false, MGMT_SETTING_SIZE },
4579 	{ set_le,                 false, MGMT_SETTING_SIZE },
4580 	{ set_dev_class,          false, MGMT_SET_DEV_CLASS_SIZE },
4581 	{ set_local_name,         false, MGMT_SET_LOCAL_NAME_SIZE },
4582 	{ add_uuid,               false, MGMT_ADD_UUID_SIZE },
4583 	{ remove_uuid,            false, MGMT_REMOVE_UUID_SIZE },
4584 	{ load_link_keys,         true,  MGMT_LOAD_LINK_KEYS_SIZE },
4585 	{ load_long_term_keys,    true,  MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4586 	{ disconnect,             false, MGMT_DISCONNECT_SIZE },
4587 	{ get_connections,        false, MGMT_GET_CONNECTIONS_SIZE },
4588 	{ pin_code_reply,         false, MGMT_PIN_CODE_REPLY_SIZE },
4589 	{ pin_code_neg_reply,     false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4590 	{ set_io_capability,      false, MGMT_SET_IO_CAPABILITY_SIZE },
4591 	{ pair_device,            false, MGMT_PAIR_DEVICE_SIZE },
4592 	{ cancel_pair_device,     false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4593 	{ unpair_device,          false, MGMT_UNPAIR_DEVICE_SIZE },
4594 	{ user_confirm_reply,     false, MGMT_USER_CONFIRM_REPLY_SIZE },
4595 	{ user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4596 	{ user_passkey_reply,     false, MGMT_USER_PASSKEY_REPLY_SIZE },
4597 	{ user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4598 	{ read_local_oob_data,    false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4599 	{ add_remote_oob_data,    true,  MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4600 	{ remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4601 	{ start_discovery,        false, MGMT_START_DISCOVERY_SIZE },
4602 	{ stop_discovery,         false, MGMT_STOP_DISCOVERY_SIZE },
4603 	{ confirm_name,           false, MGMT_CONFIRM_NAME_SIZE },
4604 	{ block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
4605 	{ unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
4606 	{ set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
4607 	{ set_advertising,        false, MGMT_SETTING_SIZE },
4608 	{ set_bredr,              false, MGMT_SETTING_SIZE },
4609 	{ set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
4610 	{ set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
4611 	{ set_secure_conn,        false, MGMT_SETTING_SIZE },
4612 	{ set_debug_keys,         false, MGMT_SETTING_SIZE },
4613 	{ set_privacy,            false, MGMT_SET_PRIVACY_SIZE },
4614 	{ load_irks,              true,  MGMT_LOAD_IRKS_SIZE },
4615 };
4616 
4617 
4618 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4619 {
4620 	void *buf;
4621 	u8 *cp;
4622 	struct mgmt_hdr *hdr;
4623 	u16 opcode, index, len;
4624 	struct hci_dev *hdev = NULL;
4625 	const struct mgmt_handler *handler;
4626 	int err;
4627 
4628 	BT_DBG("got %zu bytes", msglen);
4629 
4630 	if (msglen < sizeof(*hdr))
4631 		return -EINVAL;
4632 
4633 	buf = kmalloc(msglen, GFP_KERNEL);
4634 	if (!buf)
4635 		return -ENOMEM;
4636 
4637 	if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4638 		err = -EFAULT;
4639 		goto done;
4640 	}
4641 
4642 	hdr = buf;
4643 	opcode = __le16_to_cpu(hdr->opcode);
4644 	index = __le16_to_cpu(hdr->index);
4645 	len = __le16_to_cpu(hdr->len);
4646 
4647 	if (len != msglen - sizeof(*hdr)) {
4648 		err = -EINVAL;
4649 		goto done;
4650 	}
4651 
4652 	if (index != MGMT_INDEX_NONE) {
4653 		hdev = hci_dev_get(index);
4654 		if (!hdev) {
4655 			err = cmd_status(sk, index, opcode,
4656 					 MGMT_STATUS_INVALID_INDEX);
4657 			goto done;
4658 		}
4659 
4660 		if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4661 		    test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4662 			err = cmd_status(sk, index, opcode,
4663 					 MGMT_STATUS_INVALID_INDEX);
4664 			goto done;
4665 		}
4666 	}
4667 
4668 	if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4669 	    mgmt_handlers[opcode].func == NULL) {
4670 		BT_DBG("Unknown op %u", opcode);
4671 		err = cmd_status(sk, index, opcode,
4672 				 MGMT_STATUS_UNKNOWN_COMMAND);
4673 		goto done;
4674 	}
4675 
4676 	if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4677 	    (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4678 		err = cmd_status(sk, index, opcode,
4679 				 MGMT_STATUS_INVALID_INDEX);
4680 		goto done;
4681 	}
4682 
4683 	handler = &mgmt_handlers[opcode];
4684 
4685 	if ((handler->var_len && len < handler->data_len) ||
4686 	    (!handler->var_len && len != handler->data_len)) {
4687 		err = cmd_status(sk, index, opcode,
4688 				 MGMT_STATUS_INVALID_PARAMS);
4689 		goto done;
4690 	}
4691 
4692 	if (hdev)
4693 		mgmt_init_hdev(sk, hdev);
4694 
4695 	cp = buf + sizeof(*hdr);
4696 
4697 	err = handler->func(sk, hdev, cp, len);
4698 	if (err < 0)
4699 		goto done;
4700 
4701 	err = msglen;
4702 
4703 done:
4704 	if (hdev)
4705 		hci_dev_put(hdev);
4706 
4707 	kfree(buf);
4708 	return err;
4709 }
4710 
4711 void mgmt_index_added(struct hci_dev *hdev)
4712 {
4713 	if (hdev->dev_type != HCI_BREDR)
4714 		return;
4715 
4716 	mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4717 }
4718 
4719 void mgmt_index_removed(struct hci_dev *hdev)
4720 {
4721 	u8 status = MGMT_STATUS_INVALID_INDEX;
4722 
4723 	if (hdev->dev_type != HCI_BREDR)
4724 		return;
4725 
4726 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4727 
4728 	mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4729 }
4730 
4731 /* This function requires the caller holds hdev->lock */
4732 static void restart_le_auto_conns(struct hci_dev *hdev)
4733 {
4734 	struct hci_conn_params *p;
4735 
4736 	list_for_each_entry(p, &hdev->le_conn_params, list) {
4737 		if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4738 			hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4739 	}
4740 }
4741 
4742 static void powered_complete(struct hci_dev *hdev, u8 status)
4743 {
4744 	struct cmd_lookup match = { NULL, hdev };
4745 
4746 	BT_DBG("status 0x%02x", status);
4747 
4748 	hci_dev_lock(hdev);
4749 
4750 	restart_le_auto_conns(hdev);
4751 
4752 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4753 
4754 	new_settings(hdev, match.sk);
4755 
4756 	hci_dev_unlock(hdev);
4757 
4758 	if (match.sk)
4759 		sock_put(match.sk);
4760 }
4761 
4762 static int powered_update_hci(struct hci_dev *hdev)
4763 {
4764 	struct hci_request req;
4765 	u8 link_sec;
4766 
4767 	hci_req_init(&req, hdev);
4768 
4769 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4770 	    !lmp_host_ssp_capable(hdev)) {
4771 		u8 ssp = 1;
4772 
4773 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4774 	}
4775 
4776 	if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4777 	    lmp_bredr_capable(hdev)) {
4778 		struct hci_cp_write_le_host_supported cp;
4779 
4780 		cp.le = 1;
4781 		cp.simul = lmp_le_br_capable(hdev);
4782 
4783 		/* Check first if we already have the right
4784 		 * host state (host features set)
4785 		 */
4786 		if (cp.le != lmp_host_le_capable(hdev) ||
4787 		    cp.simul != lmp_host_le_br_capable(hdev))
4788 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4789 				    sizeof(cp), &cp);
4790 	}
4791 
4792 	if (lmp_le_capable(hdev)) {
4793 		/* Make sure the controller has a good default for
4794 		 * advertising data. This also applies to the case
4795 		 * where BR/EDR was toggled during the AUTO_OFF phase.
4796 		 */
4797 		if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4798 			update_adv_data(&req);
4799 			update_scan_rsp_data(&req);
4800 		}
4801 
4802 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4803 			enable_advertising(&req);
4804 	}
4805 
4806 	link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4807 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4808 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4809 			    sizeof(link_sec), &link_sec);
4810 
4811 	if (lmp_bredr_capable(hdev)) {
4812 		if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4813 			set_bredr_scan(&req);
4814 		update_class(&req);
4815 		update_name(&req);
4816 		update_eir(&req);
4817 	}
4818 
4819 	return hci_req_run(&req, powered_complete);
4820 }
4821 
4822 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4823 {
4824 	struct cmd_lookup match = { NULL, hdev };
4825 	u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4826 	u8 zero_cod[] = { 0, 0, 0 };
4827 	int err;
4828 
4829 	if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4830 		return 0;
4831 
4832 	if (powered) {
4833 		if (powered_update_hci(hdev) == 0)
4834 			return 0;
4835 
4836 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4837 				     &match);
4838 		goto new_settings;
4839 	}
4840 
4841 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4842 	mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4843 
4844 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4845 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4846 			   zero_cod, sizeof(zero_cod), NULL);
4847 
4848 new_settings:
4849 	err = new_settings(hdev, match.sk);
4850 
4851 	if (match.sk)
4852 		sock_put(match.sk);
4853 
4854 	return err;
4855 }
4856 
4857 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4858 {
4859 	struct pending_cmd *cmd;
4860 	u8 status;
4861 
4862 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4863 	if (!cmd)
4864 		return;
4865 
4866 	if (err == -ERFKILL)
4867 		status = MGMT_STATUS_RFKILLED;
4868 	else
4869 		status = MGMT_STATUS_FAILED;
4870 
4871 	cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4872 
4873 	mgmt_pending_remove(cmd);
4874 }
4875 
4876 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4877 {
4878 	struct hci_request req;
4879 
4880 	hci_dev_lock(hdev);
4881 
4882 	/* When discoverable timeout triggers, then just make sure
4883 	 * the limited discoverable flag is cleared. Even in the case
4884 	 * of a timeout triggered from general discoverable, it is
4885 	 * safe to unconditionally clear the flag.
4886 	 */
4887 	clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4888 	clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4889 
4890 	hci_req_init(&req, hdev);
4891 	if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4892 		u8 scan = SCAN_PAGE;
4893 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4894 			    sizeof(scan), &scan);
4895 	}
4896 	update_class(&req);
4897 	update_adv_data(&req);
4898 	hci_req_run(&req, NULL);
4899 
4900 	hdev->discov_timeout = 0;
4901 
4902 	new_settings(hdev, NULL);
4903 
4904 	hci_dev_unlock(hdev);
4905 }
4906 
4907 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4908 {
4909 	bool changed;
4910 
4911 	/* Nothing needed here if there's a pending command since that
4912 	 * commands request completion callback takes care of everything
4913 	 * necessary.
4914 	 */
4915 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4916 		return;
4917 
4918 	/* Powering off may clear the scan mode - don't let that interfere */
4919 	if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4920 		return;
4921 
4922 	if (discoverable) {
4923 		changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4924 	} else {
4925 		clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4926 		changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4927 	}
4928 
4929 	if (changed) {
4930 		struct hci_request req;
4931 
4932 		/* In case this change in discoverable was triggered by
4933 		 * a disabling of connectable there could be a need to
4934 		 * update the advertising flags.
4935 		 */
4936 		hci_req_init(&req, hdev);
4937 		update_adv_data(&req);
4938 		hci_req_run(&req, NULL);
4939 
4940 		new_settings(hdev, NULL);
4941 	}
4942 }
4943 
4944 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4945 {
4946 	bool changed;
4947 
4948 	/* Nothing needed here if there's a pending command since that
4949 	 * commands request completion callback takes care of everything
4950 	 * necessary.
4951 	 */
4952 	if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4953 		return;
4954 
4955 	/* Powering off may clear the scan mode - don't let that interfere */
4956 	if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4957 		return;
4958 
4959 	if (connectable)
4960 		changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4961 	else
4962 		changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4963 
4964 	if (changed)
4965 		new_settings(hdev, NULL);
4966 }
4967 
4968 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4969 {
4970 	/* Powering off may stop advertising - don't let that interfere */
4971 	if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4972 		return;
4973 
4974 	if (advertising)
4975 		set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4976 	else
4977 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4978 }
4979 
4980 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4981 {
4982 	u8 mgmt_err = mgmt_status(status);
4983 
4984 	if (scan & SCAN_PAGE)
4985 		mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4986 				     cmd_status_rsp, &mgmt_err);
4987 
4988 	if (scan & SCAN_INQUIRY)
4989 		mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4990 				     cmd_status_rsp, &mgmt_err);
4991 }
4992 
4993 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4994 		       bool persistent)
4995 {
4996 	struct mgmt_ev_new_link_key ev;
4997 
4998 	memset(&ev, 0, sizeof(ev));
4999 
5000 	ev.store_hint = persistent;
5001 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5002 	ev.key.addr.type = BDADDR_BREDR;
5003 	ev.key.type = key->type;
5004 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
5005 	ev.key.pin_len = key->pin_len;
5006 
5007 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5008 }
5009 
5010 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5011 {
5012 	struct mgmt_ev_new_long_term_key ev;
5013 
5014 	memset(&ev, 0, sizeof(ev));
5015 
5016 	/* Devices using resolvable or non-resolvable random addresses
5017 	 * without providing an indentity resolving key don't require
5018 	 * to store long term keys. Their addresses will change the
5019 	 * next time around.
5020 	 *
5021 	 * Only when a remote device provides an identity address
5022 	 * make sure the long term key is stored. If the remote
5023 	 * identity is known, the long term keys are internally
5024 	 * mapped to the identity address. So allow static random
5025 	 * and public addresses here.
5026 	 */
5027 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5028 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
5029 		ev.store_hint = 0x00;
5030 	else
5031 		ev.store_hint = persistent;
5032 
5033 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5034 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5035 	ev.key.type = key->authenticated;
5036 	ev.key.enc_size = key->enc_size;
5037 	ev.key.ediv = key->ediv;
5038 	ev.key.rand = key->rand;
5039 
5040 	if (key->type == HCI_SMP_LTK)
5041 		ev.key.master = 1;
5042 
5043 	memcpy(ev.key.val, key->val, sizeof(key->val));
5044 
5045 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5046 }
5047 
5048 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5049 {
5050 	struct mgmt_ev_new_irk ev;
5051 
5052 	memset(&ev, 0, sizeof(ev));
5053 
5054 	/* For identity resolving keys from devices that are already
5055 	 * using a public address or static random address, do not
5056 	 * ask for storing this key. The identity resolving key really
5057 	 * is only mandatory for devices using resovlable random
5058 	 * addresses.
5059 	 *
5060 	 * Storing all identity resolving keys has the downside that
5061 	 * they will be also loaded on next boot of they system. More
5062 	 * identity resolving keys, means more time during scanning is
5063 	 * needed to actually resolve these addresses.
5064 	 */
5065 	if (bacmp(&irk->rpa, BDADDR_ANY))
5066 		ev.store_hint = 0x01;
5067 	else
5068 		ev.store_hint = 0x00;
5069 
5070 	bacpy(&ev.rpa, &irk->rpa);
5071 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5072 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5073 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5074 
5075 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5076 }
5077 
5078 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
5079 		   bool persistent)
5080 {
5081 	struct mgmt_ev_new_csrk ev;
5082 
5083 	memset(&ev, 0, sizeof(ev));
5084 
5085 	/* Devices using resolvable or non-resolvable random addresses
5086 	 * without providing an indentity resolving key don't require
5087 	 * to store signature resolving keys. Their addresses will change
5088 	 * the next time around.
5089 	 *
5090 	 * Only when a remote device provides an identity address
5091 	 * make sure the signature resolving key is stored. So allow
5092 	 * static random and public addresses here.
5093 	 */
5094 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5095 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
5096 		ev.store_hint = 0x00;
5097 	else
5098 		ev.store_hint = persistent;
5099 
5100 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
5101 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
5102 	ev.key.master = csrk->master;
5103 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
5104 
5105 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
5106 }
5107 
5108 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5109 				  u8 data_len)
5110 {
5111 	eir[eir_len++] = sizeof(type) + data_len;
5112 	eir[eir_len++] = type;
5113 	memcpy(&eir[eir_len], data, data_len);
5114 	eir_len += data_len;
5115 
5116 	return eir_len;
5117 }
5118 
5119 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5120 			   u8 addr_type, u32 flags, u8 *name, u8 name_len,
5121 			   u8 *dev_class)
5122 {
5123 	char buf[512];
5124 	struct mgmt_ev_device_connected *ev = (void *) buf;
5125 	u16 eir_len = 0;
5126 
5127 	bacpy(&ev->addr.bdaddr, bdaddr);
5128 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5129 
5130 	ev->flags = __cpu_to_le32(flags);
5131 
5132 	if (name_len > 0)
5133 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5134 					  name, name_len);
5135 
5136 	if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5137 		eir_len = eir_append_data(ev->eir, eir_len,
5138 					  EIR_CLASS_OF_DEV, dev_class, 3);
5139 
5140 	ev->eir_len = cpu_to_le16(eir_len);
5141 
5142 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5143 		    sizeof(*ev) + eir_len, NULL);
5144 }
5145 
5146 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5147 {
5148 	struct mgmt_cp_disconnect *cp = cmd->param;
5149 	struct sock **sk = data;
5150 	struct mgmt_rp_disconnect rp;
5151 
5152 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5153 	rp.addr.type = cp->addr.type;
5154 
5155 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5156 		     sizeof(rp));
5157 
5158 	*sk = cmd->sk;
5159 	sock_hold(*sk);
5160 
5161 	mgmt_pending_remove(cmd);
5162 }
5163 
5164 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5165 {
5166 	struct hci_dev *hdev = data;
5167 	struct mgmt_cp_unpair_device *cp = cmd->param;
5168 	struct mgmt_rp_unpair_device rp;
5169 
5170 	memset(&rp, 0, sizeof(rp));
5171 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5172 	rp.addr.type = cp->addr.type;
5173 
5174 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5175 
5176 	cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5177 
5178 	mgmt_pending_remove(cmd);
5179 }
5180 
5181 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5182 			      u8 link_type, u8 addr_type, u8 reason,
5183 			      bool mgmt_connected)
5184 {
5185 	struct mgmt_ev_device_disconnected ev;
5186 	struct pending_cmd *power_off;
5187 	struct sock *sk = NULL;
5188 
5189 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5190 	if (power_off) {
5191 		struct mgmt_mode *cp = power_off->param;
5192 
5193 		/* The connection is still in hci_conn_hash so test for 1
5194 		 * instead of 0 to know if this is the last one.
5195 		 */
5196 		if (!cp->val && hci_conn_count(hdev) == 1) {
5197 			cancel_delayed_work(&hdev->power_off);
5198 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5199 		}
5200 	}
5201 
5202 	if (!mgmt_connected)
5203 		return;
5204 
5205 	if (link_type != ACL_LINK && link_type != LE_LINK)
5206 		return;
5207 
5208 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5209 
5210 	bacpy(&ev.addr.bdaddr, bdaddr);
5211 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5212 	ev.reason = reason;
5213 
5214 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5215 
5216 	if (sk)
5217 		sock_put(sk);
5218 
5219 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5220 			     hdev);
5221 }
5222 
5223 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5224 			    u8 link_type, u8 addr_type, u8 status)
5225 {
5226 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5227 	struct mgmt_cp_disconnect *cp;
5228 	struct mgmt_rp_disconnect rp;
5229 	struct pending_cmd *cmd;
5230 
5231 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5232 			     hdev);
5233 
5234 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5235 	if (!cmd)
5236 		return;
5237 
5238 	cp = cmd->param;
5239 
5240 	if (bacmp(bdaddr, &cp->addr.bdaddr))
5241 		return;
5242 
5243 	if (cp->addr.type != bdaddr_type)
5244 		return;
5245 
5246 	bacpy(&rp.addr.bdaddr, bdaddr);
5247 	rp.addr.type = bdaddr_type;
5248 
5249 	cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5250 		     mgmt_status(status), &rp, sizeof(rp));
5251 
5252 	mgmt_pending_remove(cmd);
5253 }
5254 
5255 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5256 			 u8 addr_type, u8 status)
5257 {
5258 	struct mgmt_ev_connect_failed ev;
5259 	struct pending_cmd *power_off;
5260 
5261 	power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5262 	if (power_off) {
5263 		struct mgmt_mode *cp = power_off->param;
5264 
5265 		/* The connection is still in hci_conn_hash so test for 1
5266 		 * instead of 0 to know if this is the last one.
5267 		 */
5268 		if (!cp->val && hci_conn_count(hdev) == 1) {
5269 			cancel_delayed_work(&hdev->power_off);
5270 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
5271 		}
5272 	}
5273 
5274 	bacpy(&ev.addr.bdaddr, bdaddr);
5275 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5276 	ev.status = mgmt_status(status);
5277 
5278 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5279 }
5280 
5281 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5282 {
5283 	struct mgmt_ev_pin_code_request ev;
5284 
5285 	bacpy(&ev.addr.bdaddr, bdaddr);
5286 	ev.addr.type = BDADDR_BREDR;
5287 	ev.secure = secure;
5288 
5289 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5290 }
5291 
5292 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5293 				  u8 status)
5294 {
5295 	struct pending_cmd *cmd;
5296 	struct mgmt_rp_pin_code_reply rp;
5297 
5298 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5299 	if (!cmd)
5300 		return;
5301 
5302 	bacpy(&rp.addr.bdaddr, bdaddr);
5303 	rp.addr.type = BDADDR_BREDR;
5304 
5305 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5306 		     mgmt_status(status), &rp, sizeof(rp));
5307 
5308 	mgmt_pending_remove(cmd);
5309 }
5310 
5311 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5312 				      u8 status)
5313 {
5314 	struct pending_cmd *cmd;
5315 	struct mgmt_rp_pin_code_reply rp;
5316 
5317 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5318 	if (!cmd)
5319 		return;
5320 
5321 	bacpy(&rp.addr.bdaddr, bdaddr);
5322 	rp.addr.type = BDADDR_BREDR;
5323 
5324 	cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5325 		     mgmt_status(status), &rp, sizeof(rp));
5326 
5327 	mgmt_pending_remove(cmd);
5328 }
5329 
5330 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5331 			      u8 link_type, u8 addr_type, u32 value,
5332 			      u8 confirm_hint)
5333 {
5334 	struct mgmt_ev_user_confirm_request ev;
5335 
5336 	BT_DBG("%s", hdev->name);
5337 
5338 	bacpy(&ev.addr.bdaddr, bdaddr);
5339 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5340 	ev.confirm_hint = confirm_hint;
5341 	ev.value = cpu_to_le32(value);
5342 
5343 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5344 			  NULL);
5345 }
5346 
5347 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5348 			      u8 link_type, u8 addr_type)
5349 {
5350 	struct mgmt_ev_user_passkey_request ev;
5351 
5352 	BT_DBG("%s", hdev->name);
5353 
5354 	bacpy(&ev.addr.bdaddr, bdaddr);
5355 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5356 
5357 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5358 			  NULL);
5359 }
5360 
5361 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5362 				      u8 link_type, u8 addr_type, u8 status,
5363 				      u8 opcode)
5364 {
5365 	struct pending_cmd *cmd;
5366 	struct mgmt_rp_user_confirm_reply rp;
5367 	int err;
5368 
5369 	cmd = mgmt_pending_find(opcode, hdev);
5370 	if (!cmd)
5371 		return -ENOENT;
5372 
5373 	bacpy(&rp.addr.bdaddr, bdaddr);
5374 	rp.addr.type = link_to_bdaddr(link_type, addr_type);
5375 	err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5376 			   &rp, sizeof(rp));
5377 
5378 	mgmt_pending_remove(cmd);
5379 
5380 	return err;
5381 }
5382 
5383 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5384 				     u8 link_type, u8 addr_type, u8 status)
5385 {
5386 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5387 					  status, MGMT_OP_USER_CONFIRM_REPLY);
5388 }
5389 
5390 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5391 					 u8 link_type, u8 addr_type, u8 status)
5392 {
5393 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5394 					  status,
5395 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
5396 }
5397 
5398 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5399 				     u8 link_type, u8 addr_type, u8 status)
5400 {
5401 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5402 					  status, MGMT_OP_USER_PASSKEY_REPLY);
5403 }
5404 
5405 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5406 					 u8 link_type, u8 addr_type, u8 status)
5407 {
5408 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5409 					  status,
5410 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
5411 }
5412 
5413 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5414 			     u8 link_type, u8 addr_type, u32 passkey,
5415 			     u8 entered)
5416 {
5417 	struct mgmt_ev_passkey_notify ev;
5418 
5419 	BT_DBG("%s", hdev->name);
5420 
5421 	bacpy(&ev.addr.bdaddr, bdaddr);
5422 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5423 	ev.passkey = __cpu_to_le32(passkey);
5424 	ev.entered = entered;
5425 
5426 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5427 }
5428 
5429 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5430 		      u8 addr_type, u8 status)
5431 {
5432 	struct mgmt_ev_auth_failed ev;
5433 
5434 	bacpy(&ev.addr.bdaddr, bdaddr);
5435 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
5436 	ev.status = mgmt_status(status);
5437 
5438 	mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5439 }
5440 
5441 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5442 {
5443 	struct cmd_lookup match = { NULL, hdev };
5444 	bool changed;
5445 
5446 	if (status) {
5447 		u8 mgmt_err = mgmt_status(status);
5448 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5449 				     cmd_status_rsp, &mgmt_err);
5450 		return;
5451 	}
5452 
5453 	if (test_bit(HCI_AUTH, &hdev->flags))
5454 		changed = !test_and_set_bit(HCI_LINK_SECURITY,
5455 					    &hdev->dev_flags);
5456 	else
5457 		changed = test_and_clear_bit(HCI_LINK_SECURITY,
5458 					     &hdev->dev_flags);
5459 
5460 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5461 			     &match);
5462 
5463 	if (changed)
5464 		new_settings(hdev, match.sk);
5465 
5466 	if (match.sk)
5467 		sock_put(match.sk);
5468 }
5469 
5470 static void clear_eir(struct hci_request *req)
5471 {
5472 	struct hci_dev *hdev = req->hdev;
5473 	struct hci_cp_write_eir cp;
5474 
5475 	if (!lmp_ext_inq_capable(hdev))
5476 		return;
5477 
5478 	memset(hdev->eir, 0, sizeof(hdev->eir));
5479 
5480 	memset(&cp, 0, sizeof(cp));
5481 
5482 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5483 }
5484 
5485 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5486 {
5487 	struct cmd_lookup match = { NULL, hdev };
5488 	struct hci_request req;
5489 	bool changed = false;
5490 
5491 	if (status) {
5492 		u8 mgmt_err = mgmt_status(status);
5493 
5494 		if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5495 						 &hdev->dev_flags)) {
5496 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5497 			new_settings(hdev, NULL);
5498 		}
5499 
5500 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5501 				     &mgmt_err);
5502 		return;
5503 	}
5504 
5505 	if (enable) {
5506 		changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5507 	} else {
5508 		changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5509 		if (!changed)
5510 			changed = test_and_clear_bit(HCI_HS_ENABLED,
5511 						     &hdev->dev_flags);
5512 		else
5513 			clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5514 	}
5515 
5516 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5517 
5518 	if (changed)
5519 		new_settings(hdev, match.sk);
5520 
5521 	if (match.sk)
5522 		sock_put(match.sk);
5523 
5524 	hci_req_init(&req, hdev);
5525 
5526 	if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5527 		update_eir(&req);
5528 	else
5529 		clear_eir(&req);
5530 
5531 	hci_req_run(&req, NULL);
5532 }
5533 
5534 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5535 {
5536 	struct cmd_lookup match = { NULL, hdev };
5537 	bool changed = false;
5538 
5539 	if (status) {
5540 		u8 mgmt_err = mgmt_status(status);
5541 
5542 		if (enable) {
5543 			if (test_and_clear_bit(HCI_SC_ENABLED,
5544 					       &hdev->dev_flags))
5545 				new_settings(hdev, NULL);
5546 			clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5547 		}
5548 
5549 		mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5550 				     cmd_status_rsp, &mgmt_err);
5551 		return;
5552 	}
5553 
5554 	if (enable) {
5555 		changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5556 	} else {
5557 		changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5558 		clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5559 	}
5560 
5561 	mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5562 			     settings_rsp, &match);
5563 
5564 	if (changed)
5565 		new_settings(hdev, match.sk);
5566 
5567 	if (match.sk)
5568 		sock_put(match.sk);
5569 }
5570 
5571 static void sk_lookup(struct pending_cmd *cmd, void *data)
5572 {
5573 	struct cmd_lookup *match = data;
5574 
5575 	if (match->sk == NULL) {
5576 		match->sk = cmd->sk;
5577 		sock_hold(match->sk);
5578 	}
5579 }
5580 
5581 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5582 				    u8 status)
5583 {
5584 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5585 
5586 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5587 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5588 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5589 
5590 	if (!status)
5591 		mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5592 			   NULL);
5593 
5594 	if (match.sk)
5595 		sock_put(match.sk);
5596 }
5597 
5598 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5599 {
5600 	struct mgmt_cp_set_local_name ev;
5601 	struct pending_cmd *cmd;
5602 
5603 	if (status)
5604 		return;
5605 
5606 	memset(&ev, 0, sizeof(ev));
5607 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5608 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5609 
5610 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5611 	if (!cmd) {
5612 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5613 
5614 		/* If this is a HCI command related to powering on the
5615 		 * HCI dev don't send any mgmt signals.
5616 		 */
5617 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5618 			return;
5619 	}
5620 
5621 	mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5622 		   cmd ? cmd->sk : NULL);
5623 }
5624 
5625 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5626 				       u8 *randomizer192, u8 *hash256,
5627 				       u8 *randomizer256, u8 status)
5628 {
5629 	struct pending_cmd *cmd;
5630 
5631 	BT_DBG("%s status %u", hdev->name, status);
5632 
5633 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5634 	if (!cmd)
5635 		return;
5636 
5637 	if (status) {
5638 		cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5639 			   mgmt_status(status));
5640 	} else {
5641 		if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5642 		    hash256 && randomizer256) {
5643 			struct mgmt_rp_read_local_oob_ext_data rp;
5644 
5645 			memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5646 			memcpy(rp.randomizer192, randomizer192,
5647 			       sizeof(rp.randomizer192));
5648 
5649 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5650 			memcpy(rp.randomizer256, randomizer256,
5651 			       sizeof(rp.randomizer256));
5652 
5653 			cmd_complete(cmd->sk, hdev->id,
5654 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5655 				     &rp, sizeof(rp));
5656 		} else {
5657 			struct mgmt_rp_read_local_oob_data rp;
5658 
5659 			memcpy(rp.hash, hash192, sizeof(rp.hash));
5660 			memcpy(rp.randomizer, randomizer192,
5661 			       sizeof(rp.randomizer));
5662 
5663 			cmd_complete(cmd->sk, hdev->id,
5664 				     MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5665 				     &rp, sizeof(rp));
5666 		}
5667 	}
5668 
5669 	mgmt_pending_remove(cmd);
5670 }
5671 
5672 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5673 		       u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
5674 		       u8 ssp, u8 *eir, u16 eir_len, u8 *scan_rsp,
5675 		       u8 scan_rsp_len)
5676 {
5677 	char buf[512];
5678 	struct mgmt_ev_device_found *ev = (void *) buf;
5679 	struct smp_irk *irk;
5680 	size_t ev_size;
5681 
5682 	if (!hci_discovery_active(hdev))
5683 		return;
5684 
5685 	/* Make sure that the buffer is big enough. The 5 extra bytes
5686 	 * are for the potential CoD field.
5687 	 */
5688 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
5689 		return;
5690 
5691 	memset(buf, 0, sizeof(buf));
5692 
5693 	irk = hci_get_irk(hdev, bdaddr, addr_type);
5694 	if (irk) {
5695 		bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5696 		ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5697 	} else {
5698 		bacpy(&ev->addr.bdaddr, bdaddr);
5699 		ev->addr.type = link_to_bdaddr(link_type, addr_type);
5700 	}
5701 
5702 	ev->rssi = rssi;
5703 	if (cfm_name)
5704 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5705 	if (!ssp)
5706 		ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5707 
5708 	if (eir_len > 0)
5709 		memcpy(ev->eir, eir, eir_len);
5710 
5711 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5712 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5713 					  dev_class, 3);
5714 
5715 	if (scan_rsp_len > 0)
5716 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
5717 
5718 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
5719 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
5720 
5721 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5722 }
5723 
5724 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5725 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5726 {
5727 	struct mgmt_ev_device_found *ev;
5728 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5729 	u16 eir_len;
5730 
5731 	ev = (struct mgmt_ev_device_found *) buf;
5732 
5733 	memset(buf, 0, sizeof(buf));
5734 
5735 	bacpy(&ev->addr.bdaddr, bdaddr);
5736 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
5737 	ev->rssi = rssi;
5738 
5739 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5740 				  name_len);
5741 
5742 	ev->eir_len = cpu_to_le16(eir_len);
5743 
5744 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5745 }
5746 
5747 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5748 {
5749 	struct mgmt_ev_discovering ev;
5750 	struct pending_cmd *cmd;
5751 
5752 	BT_DBG("%s discovering %u", hdev->name, discovering);
5753 
5754 	if (discovering)
5755 		cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5756 	else
5757 		cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5758 
5759 	if (cmd != NULL) {
5760 		u8 type = hdev->discovery.type;
5761 
5762 		cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5763 			     sizeof(type));
5764 		mgmt_pending_remove(cmd);
5765 	}
5766 
5767 	memset(&ev, 0, sizeof(ev));
5768 	ev.type = hdev->discovery.type;
5769 	ev.discovering = discovering;
5770 
5771 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5772 }
5773 
5774 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5775 {
5776 	struct pending_cmd *cmd;
5777 	struct mgmt_ev_device_blocked ev;
5778 
5779 	cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5780 
5781 	bacpy(&ev.addr.bdaddr, bdaddr);
5782 	ev.addr.type = type;
5783 
5784 	return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5785 			  cmd ? cmd->sk : NULL);
5786 }
5787 
5788 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5789 {
5790 	struct pending_cmd *cmd;
5791 	struct mgmt_ev_device_unblocked ev;
5792 
5793 	cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5794 
5795 	bacpy(&ev.addr.bdaddr, bdaddr);
5796 	ev.addr.type = type;
5797 
5798 	return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5799 			  cmd ? cmd->sk : NULL);
5800 }
5801 
5802 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5803 {
5804 	BT_DBG("%s status %u", hdev->name, status);
5805 
5806 	/* Clear the advertising mgmt setting if we failed to re-enable it */
5807 	if (status) {
5808 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5809 		new_settings(hdev, NULL);
5810 	}
5811 }
5812 
5813 void mgmt_reenable_advertising(struct hci_dev *hdev)
5814 {
5815 	struct hci_request req;
5816 
5817 	if (hci_conn_num(hdev, LE_LINK) > 0)
5818 		return;
5819 
5820 	if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5821 		return;
5822 
5823 	hci_req_init(&req, hdev);
5824 	enable_advertising(&req);
5825 
5826 	/* If this fails we have no option but to let user space know
5827 	 * that we've disabled advertising.
5828 	 */
5829 	if (hci_req_run(&req, adv_enable_complete) < 0) {
5830 		clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5831 		new_settings(hdev, NULL);
5832 	}
5833 }
5834