xref: /linux/net/bluetooth/mgmt.c (revision b2ddeb11738464ce8f75c15384a3b8132cb80357)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	9
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 };
106 
107 static const u16 mgmt_events[] = {
108 	MGMT_EV_CONTROLLER_ERROR,
109 	MGMT_EV_INDEX_ADDED,
110 	MGMT_EV_INDEX_REMOVED,
111 	MGMT_EV_NEW_SETTINGS,
112 	MGMT_EV_CLASS_OF_DEV_CHANGED,
113 	MGMT_EV_LOCAL_NAME_CHANGED,
114 	MGMT_EV_NEW_LINK_KEY,
115 	MGMT_EV_NEW_LONG_TERM_KEY,
116 	MGMT_EV_DEVICE_CONNECTED,
117 	MGMT_EV_DEVICE_DISCONNECTED,
118 	MGMT_EV_CONNECT_FAILED,
119 	MGMT_EV_PIN_CODE_REQUEST,
120 	MGMT_EV_USER_CONFIRM_REQUEST,
121 	MGMT_EV_USER_PASSKEY_REQUEST,
122 	MGMT_EV_AUTH_FAILED,
123 	MGMT_EV_DEVICE_FOUND,
124 	MGMT_EV_DISCOVERING,
125 	MGMT_EV_DEVICE_BLOCKED,
126 	MGMT_EV_DEVICE_UNBLOCKED,
127 	MGMT_EV_DEVICE_UNPAIRED,
128 	MGMT_EV_PASSKEY_NOTIFY,
129 	MGMT_EV_NEW_IRK,
130 	MGMT_EV_NEW_CSRK,
131 	MGMT_EV_DEVICE_ADDED,
132 	MGMT_EV_DEVICE_REMOVED,
133 	MGMT_EV_NEW_CONN_PARAM,
134 	MGMT_EV_UNCONF_INDEX_ADDED,
135 	MGMT_EV_UNCONF_INDEX_REMOVED,
136 	MGMT_EV_NEW_CONFIG_OPTIONS,
137 	MGMT_EV_EXT_INDEX_ADDED,
138 	MGMT_EV_EXT_INDEX_REMOVED,
139 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 	MGMT_EV_ADVERTISING_ADDED,
141 	MGMT_EV_ADVERTISING_REMOVED,
142 };
143 
144 static const u16 mgmt_untrusted_commands[] = {
145 	MGMT_OP_READ_INDEX_LIST,
146 	MGMT_OP_READ_INFO,
147 	MGMT_OP_READ_UNCONF_INDEX_LIST,
148 	MGMT_OP_READ_CONFIG_INFO,
149 	MGMT_OP_READ_EXT_INDEX_LIST,
150 };
151 
152 static const u16 mgmt_untrusted_events[] = {
153 	MGMT_EV_INDEX_ADDED,
154 	MGMT_EV_INDEX_REMOVED,
155 	MGMT_EV_NEW_SETTINGS,
156 	MGMT_EV_CLASS_OF_DEV_CHANGED,
157 	MGMT_EV_LOCAL_NAME_CHANGED,
158 	MGMT_EV_UNCONF_INDEX_ADDED,
159 	MGMT_EV_UNCONF_INDEX_REMOVED,
160 	MGMT_EV_NEW_CONFIG_OPTIONS,
161 	MGMT_EV_EXT_INDEX_ADDED,
162 	MGMT_EV_EXT_INDEX_REMOVED,
163 };
164 
165 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
166 
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
169 
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 	MGMT_STATUS_SUCCESS,
173 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
174 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
175 	MGMT_STATUS_FAILED,		/* Hardware Failure */
176 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
177 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
178 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
179 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
180 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
181 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
182 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
183 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
184 	MGMT_STATUS_BUSY,		/* Command Disallowed */
185 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
186 	MGMT_STATUS_REJECTED,		/* Rejected Security */
187 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
188 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
189 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
190 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
191 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
192 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
193 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
194 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
195 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
196 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
197 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
199 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
200 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
201 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
202 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
203 	MGMT_STATUS_FAILED,		/* Unspecified Error */
204 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
205 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
206 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
207 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
208 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
209 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
210 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
211 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
212 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
213 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
214 	MGMT_STATUS_FAILED,		/* Transaction Collision */
215 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
216 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
217 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
218 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
219 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
220 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
221 	MGMT_STATUS_FAILED,		/* Slot Violation */
222 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
223 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
225 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
226 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
227 	MGMT_STATUS_BUSY,		/* Controller Busy */
228 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
229 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
230 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
231 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
232 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
233 };
234 
235 static u8 mgmt_status(u8 hci_status)
236 {
237 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 		return mgmt_status_table[hci_status];
239 
240 	return MGMT_STATUS_FAILED;
241 }
242 
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 			    u16 len, int flag)
245 {
246 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 			       flag, NULL);
248 }
249 
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 			      u16 len, int flag, struct sock *skip_sk)
252 {
253 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 			       flag, skip_sk);
255 }
256 
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 			      u16 len, struct sock *skip_sk)
259 {
260 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
262 }
263 
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 		      struct sock *skip_sk)
266 {
267 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 			       HCI_SOCK_TRUSTED, skip_sk);
269 }
270 
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
272 			u16 data_len)
273 {
274 	struct mgmt_rp_read_version rp;
275 
276 	BT_DBG("sock %p", sk);
277 
278 	rp.version = MGMT_VERSION;
279 	rp.revision = cpu_to_le16(MGMT_REVISION);
280 
281 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
282 				 &rp, sizeof(rp));
283 }
284 
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
286 			 u16 data_len)
287 {
288 	struct mgmt_rp_read_commands *rp;
289 	u16 num_commands, num_events;
290 	size_t rp_size;
291 	int i, err;
292 
293 	BT_DBG("sock %p", sk);
294 
295 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 		num_commands = ARRAY_SIZE(mgmt_commands);
297 		num_events = ARRAY_SIZE(mgmt_events);
298 	} else {
299 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
301 	}
302 
303 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
304 
305 	rp = kmalloc(rp_size, GFP_KERNEL);
306 	if (!rp)
307 		return -ENOMEM;
308 
309 	rp->num_commands = cpu_to_le16(num_commands);
310 	rp->num_events = cpu_to_le16(num_events);
311 
312 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 		__le16 *opcode = rp->opcodes;
314 
315 		for (i = 0; i < num_commands; i++, opcode++)
316 			put_unaligned_le16(mgmt_commands[i], opcode);
317 
318 		for (i = 0; i < num_events; i++, opcode++)
319 			put_unaligned_le16(mgmt_events[i], opcode);
320 	} else {
321 		__le16 *opcode = rp->opcodes;
322 
323 		for (i = 0; i < num_commands; i++, opcode++)
324 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
325 
326 		for (i = 0; i < num_events; i++, opcode++)
327 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
328 	}
329 
330 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
331 				rp, rp_size);
332 	kfree(rp);
333 
334 	return err;
335 }
336 
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
338 			   u16 data_len)
339 {
340 	struct mgmt_rp_read_index_list *rp;
341 	struct hci_dev *d;
342 	size_t rp_len;
343 	u16 count;
344 	int err;
345 
346 	BT_DBG("sock %p", sk);
347 
348 	read_lock(&hci_dev_list_lock);
349 
350 	count = 0;
351 	list_for_each_entry(d, &hci_dev_list, list) {
352 		if (d->dev_type == HCI_BREDR &&
353 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354 			count++;
355 	}
356 
357 	rp_len = sizeof(*rp) + (2 * count);
358 	rp = kmalloc(rp_len, GFP_ATOMIC);
359 	if (!rp) {
360 		read_unlock(&hci_dev_list_lock);
361 		return -ENOMEM;
362 	}
363 
364 	count = 0;
365 	list_for_each_entry(d, &hci_dev_list, list) {
366 		if (hci_dev_test_flag(d, HCI_SETUP) ||
367 		    hci_dev_test_flag(d, HCI_CONFIG) ||
368 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 			continue;
370 
371 		/* Devices marked as raw-only are neither configured
372 		 * nor unconfigured controllers.
373 		 */
374 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 			continue;
376 
377 		if (d->dev_type == HCI_BREDR &&
378 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 			rp->index[count++] = cpu_to_le16(d->id);
380 			BT_DBG("Added hci%u", d->id);
381 		}
382 	}
383 
384 	rp->num_controllers = cpu_to_le16(count);
385 	rp_len = sizeof(*rp) + (2 * count);
386 
387 	read_unlock(&hci_dev_list_lock);
388 
389 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
390 				0, rp, rp_len);
391 
392 	kfree(rp);
393 
394 	return err;
395 }
396 
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 				  void *data, u16 data_len)
399 {
400 	struct mgmt_rp_read_unconf_index_list *rp;
401 	struct hci_dev *d;
402 	size_t rp_len;
403 	u16 count;
404 	int err;
405 
406 	BT_DBG("sock %p", sk);
407 
408 	read_lock(&hci_dev_list_lock);
409 
410 	count = 0;
411 	list_for_each_entry(d, &hci_dev_list, list) {
412 		if (d->dev_type == HCI_BREDR &&
413 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 			count++;
415 	}
416 
417 	rp_len = sizeof(*rp) + (2 * count);
418 	rp = kmalloc(rp_len, GFP_ATOMIC);
419 	if (!rp) {
420 		read_unlock(&hci_dev_list_lock);
421 		return -ENOMEM;
422 	}
423 
424 	count = 0;
425 	list_for_each_entry(d, &hci_dev_list, list) {
426 		if (hci_dev_test_flag(d, HCI_SETUP) ||
427 		    hci_dev_test_flag(d, HCI_CONFIG) ||
428 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 			continue;
430 
431 		/* Devices marked as raw-only are neither configured
432 		 * nor unconfigured controllers.
433 		 */
434 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
435 			continue;
436 
437 		if (d->dev_type == HCI_BREDR &&
438 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 			rp->index[count++] = cpu_to_le16(d->id);
440 			BT_DBG("Added hci%u", d->id);
441 		}
442 	}
443 
444 	rp->num_controllers = cpu_to_le16(count);
445 	rp_len = sizeof(*rp) + (2 * count);
446 
447 	read_unlock(&hci_dev_list_lock);
448 
449 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
451 
452 	kfree(rp);
453 
454 	return err;
455 }
456 
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 			       void *data, u16 data_len)
459 {
460 	struct mgmt_rp_read_ext_index_list *rp;
461 	struct hci_dev *d;
462 	size_t rp_len;
463 	u16 count;
464 	int err;
465 
466 	BT_DBG("sock %p", sk);
467 
468 	read_lock(&hci_dev_list_lock);
469 
470 	count = 0;
471 	list_for_each_entry(d, &hci_dev_list, list) {
472 		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
473 			count++;
474 	}
475 
476 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 	rp = kmalloc(rp_len, GFP_ATOMIC);
478 	if (!rp) {
479 		read_unlock(&hci_dev_list_lock);
480 		return -ENOMEM;
481 	}
482 
483 	count = 0;
484 	list_for_each_entry(d, &hci_dev_list, list) {
485 		if (hci_dev_test_flag(d, HCI_SETUP) ||
486 		    hci_dev_test_flag(d, HCI_CONFIG) ||
487 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
488 			continue;
489 
490 		/* Devices marked as raw-only are neither configured
491 		 * nor unconfigured controllers.
492 		 */
493 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
494 			continue;
495 
496 		if (d->dev_type == HCI_BREDR) {
497 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 				rp->entry[count].type = 0x01;
499 			else
500 				rp->entry[count].type = 0x00;
501 		} else if (d->dev_type == HCI_AMP) {
502 			rp->entry[count].type = 0x02;
503 		} else {
504 			continue;
505 		}
506 
507 		rp->entry[count].bus = d->bus;
508 		rp->entry[count++].index = cpu_to_le16(d->id);
509 		BT_DBG("Added hci%u", d->id);
510 	}
511 
512 	rp->num_controllers = cpu_to_le16(count);
513 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
514 
515 	read_unlock(&hci_dev_list_lock);
516 
517 	/* If this command is called at least once, then all the
518 	 * default index and unconfigured index events are disabled
519 	 * and from now on only extended index events are used.
520 	 */
521 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
524 
525 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
527 
528 	kfree(rp);
529 
530 	return err;
531 }
532 
533 static bool is_configured(struct hci_dev *hdev)
534 {
535 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 		return false;
538 
539 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
541 		return false;
542 
543 	return true;
544 }
545 
546 static __le32 get_missing_options(struct hci_dev *hdev)
547 {
548 	u32 options = 0;
549 
550 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
553 
554 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
556 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
557 
558 	return cpu_to_le32(options);
559 }
560 
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
562 {
563 	__le32 options = get_missing_options(hdev);
564 
565 	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 				  sizeof(options), skip);
567 }
568 
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
570 {
571 	__le32 options = get_missing_options(hdev);
572 
573 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
574 				 sizeof(options));
575 }
576 
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 			    void *data, u16 data_len)
579 {
580 	struct mgmt_rp_read_config_info rp;
581 	u32 options = 0;
582 
583 	BT_DBG("sock %p %s", sk, hdev->name);
584 
585 	hci_dev_lock(hdev);
586 
587 	memset(&rp, 0, sizeof(rp));
588 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
589 
590 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
592 
593 	if (hdev->set_bdaddr)
594 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
595 
596 	rp.supported_options = cpu_to_le32(options);
597 	rp.missing_options = get_missing_options(hdev);
598 
599 	hci_dev_unlock(hdev);
600 
601 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
602 				 &rp, sizeof(rp));
603 }
604 
605 static u32 get_supported_settings(struct hci_dev *hdev)
606 {
607 	u32 settings = 0;
608 
609 	settings |= MGMT_SETTING_POWERED;
610 	settings |= MGMT_SETTING_BONDABLE;
611 	settings |= MGMT_SETTING_DEBUG_KEYS;
612 	settings |= MGMT_SETTING_CONNECTABLE;
613 	settings |= MGMT_SETTING_DISCOVERABLE;
614 
615 	if (lmp_bredr_capable(hdev)) {
616 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 		settings |= MGMT_SETTING_BREDR;
619 		settings |= MGMT_SETTING_LINK_SECURITY;
620 
621 		if (lmp_ssp_capable(hdev)) {
622 			settings |= MGMT_SETTING_SSP;
623 			settings |= MGMT_SETTING_HS;
624 		}
625 
626 		if (lmp_sc_capable(hdev))
627 			settings |= MGMT_SETTING_SECURE_CONN;
628 	}
629 
630 	if (lmp_le_capable(hdev)) {
631 		settings |= MGMT_SETTING_LE;
632 		settings |= MGMT_SETTING_ADVERTISING;
633 		settings |= MGMT_SETTING_SECURE_CONN;
634 		settings |= MGMT_SETTING_PRIVACY;
635 		settings |= MGMT_SETTING_STATIC_ADDRESS;
636 	}
637 
638 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
639 	    hdev->set_bdaddr)
640 		settings |= MGMT_SETTING_CONFIGURATION;
641 
642 	return settings;
643 }
644 
645 static u32 get_current_settings(struct hci_dev *hdev)
646 {
647 	u32 settings = 0;
648 
649 	if (hdev_is_powered(hdev))
650 		settings |= MGMT_SETTING_POWERED;
651 
652 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 		settings |= MGMT_SETTING_CONNECTABLE;
654 
655 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
657 
658 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 		settings |= MGMT_SETTING_DISCOVERABLE;
660 
661 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 		settings |= MGMT_SETTING_BONDABLE;
663 
664 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 		settings |= MGMT_SETTING_BREDR;
666 
667 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 		settings |= MGMT_SETTING_LE;
669 
670 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 		settings |= MGMT_SETTING_LINK_SECURITY;
672 
673 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 		settings |= MGMT_SETTING_SSP;
675 
676 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 		settings |= MGMT_SETTING_HS;
678 
679 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 		settings |= MGMT_SETTING_ADVERTISING;
681 
682 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 		settings |= MGMT_SETTING_SECURE_CONN;
684 
685 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 		settings |= MGMT_SETTING_DEBUG_KEYS;
687 
688 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 		settings |= MGMT_SETTING_PRIVACY;
690 
691 	/* The current setting for static address has two purposes. The
692 	 * first is to indicate if the static address will be used and
693 	 * the second is to indicate if it is actually set.
694 	 *
695 	 * This means if the static address is not configured, this flag
696 	 * will never be set. If the address is configured, then if the
697 	 * address is actually used decides if the flag is set or not.
698 	 *
699 	 * For single mode LE only controllers and dual-mode controllers
700 	 * with BR/EDR disabled, the existence of the static address will
701 	 * be evaluated.
702 	 */
703 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 			settings |= MGMT_SETTING_STATIC_ADDRESS;
708 	}
709 
710 	return settings;
711 }
712 
713 #define PNP_INFO_SVCLASS_ID		0x1200
714 
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
716 {
717 	u8 *ptr = data, *uuids_start = NULL;
718 	struct bt_uuid *uuid;
719 
720 	if (len < 4)
721 		return ptr;
722 
723 	list_for_each_entry(uuid, &hdev->uuids, list) {
724 		u16 uuid16;
725 
726 		if (uuid->size != 16)
727 			continue;
728 
729 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
730 		if (uuid16 < 0x1100)
731 			continue;
732 
733 		if (uuid16 == PNP_INFO_SVCLASS_ID)
734 			continue;
735 
736 		if (!uuids_start) {
737 			uuids_start = ptr;
738 			uuids_start[0] = 1;
739 			uuids_start[1] = EIR_UUID16_ALL;
740 			ptr += 2;
741 		}
742 
743 		/* Stop if not enough space to put next UUID */
744 		if ((ptr - data) + sizeof(u16) > len) {
745 			uuids_start[1] = EIR_UUID16_SOME;
746 			break;
747 		}
748 
749 		*ptr++ = (uuid16 & 0x00ff);
750 		*ptr++ = (uuid16 & 0xff00) >> 8;
751 		uuids_start[0] += sizeof(uuid16);
752 	}
753 
754 	return ptr;
755 }
756 
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
758 {
759 	u8 *ptr = data, *uuids_start = NULL;
760 	struct bt_uuid *uuid;
761 
762 	if (len < 6)
763 		return ptr;
764 
765 	list_for_each_entry(uuid, &hdev->uuids, list) {
766 		if (uuid->size != 32)
767 			continue;
768 
769 		if (!uuids_start) {
770 			uuids_start = ptr;
771 			uuids_start[0] = 1;
772 			uuids_start[1] = EIR_UUID32_ALL;
773 			ptr += 2;
774 		}
775 
776 		/* Stop if not enough space to put next UUID */
777 		if ((ptr - data) + sizeof(u32) > len) {
778 			uuids_start[1] = EIR_UUID32_SOME;
779 			break;
780 		}
781 
782 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
783 		ptr += sizeof(u32);
784 		uuids_start[0] += sizeof(u32);
785 	}
786 
787 	return ptr;
788 }
789 
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
791 {
792 	u8 *ptr = data, *uuids_start = NULL;
793 	struct bt_uuid *uuid;
794 
795 	if (len < 18)
796 		return ptr;
797 
798 	list_for_each_entry(uuid, &hdev->uuids, list) {
799 		if (uuid->size != 128)
800 			continue;
801 
802 		if (!uuids_start) {
803 			uuids_start = ptr;
804 			uuids_start[0] = 1;
805 			uuids_start[1] = EIR_UUID128_ALL;
806 			ptr += 2;
807 		}
808 
809 		/* Stop if not enough space to put next UUID */
810 		if ((ptr - data) + 16 > len) {
811 			uuids_start[1] = EIR_UUID128_SOME;
812 			break;
813 		}
814 
815 		memcpy(ptr, uuid->uuid, 16);
816 		ptr += 16;
817 		uuids_start[0] += 16;
818 	}
819 
820 	return ptr;
821 }
822 
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
824 {
825 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
826 }
827 
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 						  struct hci_dev *hdev,
830 						  const void *data)
831 {
832 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
833 }
834 
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
836 {
837 	u8 ad_len = 0;
838 	size_t name_len;
839 
840 	name_len = strlen(hdev->dev_name);
841 	if (name_len > 0) {
842 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
843 
844 		if (name_len > max_len) {
845 			name_len = max_len;
846 			ptr[1] = EIR_NAME_SHORT;
847 		} else
848 			ptr[1] = EIR_NAME_COMPLETE;
849 
850 		ptr[0] = name_len + 1;
851 
852 		memcpy(ptr + 2, hdev->dev_name, name_len);
853 
854 		ad_len += (name_len + 2);
855 		ptr += (name_len + 2);
856 	}
857 
858 	return ad_len;
859 }
860 
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
862 {
863 	/* TODO: Set the appropriate entries based on advertising instance flags
864 	 * here once flags other than 0 are supported.
865 	 */
866 	memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 	       hdev->adv_instance.scan_rsp_len);
868 
869 	return hdev->adv_instance.scan_rsp_len;
870 }
871 
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
873 					      u8 instance)
874 {
875 	struct hci_dev *hdev = req->hdev;
876 	struct hci_cp_le_set_scan_rsp_data cp;
877 	u8 len;
878 
879 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
880 		return;
881 
882 	memset(&cp, 0, sizeof(cp));
883 
884 	if (instance)
885 		len = create_instance_scan_rsp_data(hdev, cp.data);
886 	else
887 		len = create_default_scan_rsp_data(hdev, cp.data);
888 
889 	if (hdev->scan_rsp_data_len == len &&
890 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
891 		return;
892 
893 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 	hdev->scan_rsp_data_len = len;
895 
896 	cp.length = len;
897 
898 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
899 }
900 
901 static void update_scan_rsp_data(struct hci_request *req)
902 {
903 	struct hci_dev *hdev = req->hdev;
904 	u8 instance;
905 
906 	/* The "Set Advertising" setting supersedes the "Add Advertising"
907 	 * setting. Here we set the scan response data based on which
908 	 * setting was set. When neither apply, default to the global settings,
909 	 * represented by instance "0".
910 	 */
911 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
913 		instance = 0x01;
914 	else
915 		instance = 0x00;
916 
917 	update_scan_rsp_data_for_instance(req, instance);
918 }
919 
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
921 {
922 	struct mgmt_pending_cmd *cmd;
923 
924 	/* If there's a pending mgmt command the flags will not yet have
925 	 * their final values, so check for this first.
926 	 */
927 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 	if (cmd) {
929 		struct mgmt_mode *cp = cmd->param;
930 		if (cp->val == 0x01)
931 			return LE_AD_GENERAL;
932 		else if (cp->val == 0x02)
933 			return LE_AD_LIMITED;
934 	} else {
935 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 			return LE_AD_LIMITED;
937 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 			return LE_AD_GENERAL;
939 	}
940 
941 	return 0;
942 }
943 
944 static u8 get_current_adv_instance(struct hci_dev *hdev)
945 {
946 	/* The "Set Advertising" setting supersedes the "Add Advertising"
947 	 * setting. Here we set the advertising data based on which
948 	 * setting was set. When neither apply, default to the global settings,
949 	 * represented by instance "0".
950 	 */
951 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 		return 0x01;
954 
955 	return 0x00;
956 }
957 
958 static bool get_connectable(struct hci_dev *hdev)
959 {
960 	struct mgmt_pending_cmd *cmd;
961 
962 	/* If there's a pending mgmt command the flag will not yet have
963 	 * it's final value, so check for this first.
964 	 */
965 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 	if (cmd) {
967 		struct mgmt_mode *cp = cmd->param;
968 
969 		return cp->val;
970 	}
971 
972 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
973 }
974 
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
976 {
977 	u32 flags;
978 
979 	if (instance > 0x01)
980 		return 0;
981 
982 	if (instance == 0x01)
983 		return hdev->adv_instance.flags;
984 
985 	/* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 	flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
987 
988 	/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 	 * to the "connectable" instance flag.
990 	 */
991 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
992 		flags |= MGMT_ADV_FLAG_CONNECTABLE;
993 
994 	return flags;
995 }
996 
997 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
998 {
999 	/* Ignore instance 0 and other unsupported instances */
1000 	if (instance != 0x01)
1001 		return 0;
1002 
1003 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1004 	 * These are currently being ignored as they are not supported.
1005 	 */
1006 	return hdev->adv_instance.scan_rsp_len;
1007 }
1008 
1009 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1010 {
1011 	u8 ad_len = 0, flags = 0;
1012 	u32 instance_flags = get_adv_instance_flags(hdev, instance);
1013 
1014 	/* The Add Advertising command allows userspace to set both the general
1015 	 * and limited discoverable flags.
1016 	 */
1017 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1018 		flags |= LE_AD_GENERAL;
1019 
1020 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1021 		flags |= LE_AD_LIMITED;
1022 
1023 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1024 		/* If a discovery flag wasn't provided, simply use the global
1025 		 * settings.
1026 		 */
1027 		if (!flags)
1028 			flags |= get_adv_discov_flags(hdev);
1029 
1030 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1031 			flags |= LE_AD_NO_BREDR;
1032 
1033 		/* If flags would still be empty, then there is no need to
1034 		 * include the "Flags" AD field".
1035 		 */
1036 		if (flags) {
1037 			ptr[0] = 0x02;
1038 			ptr[1] = EIR_FLAGS;
1039 			ptr[2] = flags;
1040 
1041 			ad_len += 3;
1042 			ptr += 3;
1043 		}
1044 	}
1045 
1046 	/* Provide Tx Power only if we can provide a valid value for it */
1047 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1048 	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1049 		ptr[0] = 0x02;
1050 		ptr[1] = EIR_TX_POWER;
1051 		ptr[2] = (u8)hdev->adv_tx_power;
1052 
1053 		ad_len += 3;
1054 		ptr += 3;
1055 	}
1056 
1057 	if (instance) {
1058 		memcpy(ptr, hdev->adv_instance.adv_data,
1059 		       hdev->adv_instance.adv_data_len);
1060 		ad_len += hdev->adv_instance.adv_data_len;
1061 	}
1062 
1063 	return ad_len;
1064 }
1065 
1066 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1067 {
1068 	struct hci_dev *hdev = req->hdev;
1069 	struct hci_cp_le_set_adv_data cp;
1070 	u8 len;
1071 
1072 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1073 		return;
1074 
1075 	memset(&cp, 0, sizeof(cp));
1076 
1077 	len = create_instance_adv_data(hdev, instance, cp.data);
1078 
1079 	/* There's nothing to do if the data hasn't changed */
1080 	if (hdev->adv_data_len == len &&
1081 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1082 		return;
1083 
1084 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1085 	hdev->adv_data_len = len;
1086 
1087 	cp.length = len;
1088 
1089 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1090 }
1091 
1092 static void update_adv_data(struct hci_request *req)
1093 {
1094 	struct hci_dev *hdev = req->hdev;
1095 	u8 instance = get_current_adv_instance(hdev);
1096 
1097 	update_adv_data_for_instance(req, instance);
1098 }
1099 
1100 int mgmt_update_adv_data(struct hci_dev *hdev)
1101 {
1102 	struct hci_request req;
1103 
1104 	hci_req_init(&req, hdev);
1105 	update_adv_data(&req);
1106 
1107 	return hci_req_run(&req, NULL);
1108 }
1109 
1110 static void create_eir(struct hci_dev *hdev, u8 *data)
1111 {
1112 	u8 *ptr = data;
1113 	size_t name_len;
1114 
1115 	name_len = strlen(hdev->dev_name);
1116 
1117 	if (name_len > 0) {
1118 		/* EIR Data type */
1119 		if (name_len > 48) {
1120 			name_len = 48;
1121 			ptr[1] = EIR_NAME_SHORT;
1122 		} else
1123 			ptr[1] = EIR_NAME_COMPLETE;
1124 
1125 		/* EIR Data length */
1126 		ptr[0] = name_len + 1;
1127 
1128 		memcpy(ptr + 2, hdev->dev_name, name_len);
1129 
1130 		ptr += (name_len + 2);
1131 	}
1132 
1133 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1134 		ptr[0] = 2;
1135 		ptr[1] = EIR_TX_POWER;
1136 		ptr[2] = (u8) hdev->inq_tx_power;
1137 
1138 		ptr += 3;
1139 	}
1140 
1141 	if (hdev->devid_source > 0) {
1142 		ptr[0] = 9;
1143 		ptr[1] = EIR_DEVICE_ID;
1144 
1145 		put_unaligned_le16(hdev->devid_source, ptr + 2);
1146 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1147 		put_unaligned_le16(hdev->devid_product, ptr + 6);
1148 		put_unaligned_le16(hdev->devid_version, ptr + 8);
1149 
1150 		ptr += 10;
1151 	}
1152 
1153 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1154 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1155 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1156 }
1157 
1158 static void update_eir(struct hci_request *req)
1159 {
1160 	struct hci_dev *hdev = req->hdev;
1161 	struct hci_cp_write_eir cp;
1162 
1163 	if (!hdev_is_powered(hdev))
1164 		return;
1165 
1166 	if (!lmp_ext_inq_capable(hdev))
1167 		return;
1168 
1169 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1170 		return;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1173 		return;
1174 
1175 	memset(&cp, 0, sizeof(cp));
1176 
1177 	create_eir(hdev, cp.data);
1178 
1179 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1180 		return;
1181 
1182 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
1183 
1184 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1185 }
1186 
1187 static u8 get_service_classes(struct hci_dev *hdev)
1188 {
1189 	struct bt_uuid *uuid;
1190 	u8 val = 0;
1191 
1192 	list_for_each_entry(uuid, &hdev->uuids, list)
1193 		val |= uuid->svc_hint;
1194 
1195 	return val;
1196 }
1197 
1198 static void update_class(struct hci_request *req)
1199 {
1200 	struct hci_dev *hdev = req->hdev;
1201 	u8 cod[3];
1202 
1203 	BT_DBG("%s", hdev->name);
1204 
1205 	if (!hdev_is_powered(hdev))
1206 		return;
1207 
1208 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1209 		return;
1210 
1211 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1212 		return;
1213 
1214 	cod[0] = hdev->minor_class;
1215 	cod[1] = hdev->major_class;
1216 	cod[2] = get_service_classes(hdev);
1217 
1218 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1219 		cod[1] |= 0x20;
1220 
1221 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1222 		return;
1223 
1224 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1225 }
1226 
1227 static void disable_advertising(struct hci_request *req)
1228 {
1229 	u8 enable = 0x00;
1230 
1231 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1232 }
1233 
1234 static void enable_advertising(struct hci_request *req)
1235 {
1236 	struct hci_dev *hdev = req->hdev;
1237 	struct hci_cp_le_set_adv_param cp;
1238 	u8 own_addr_type, enable = 0x01;
1239 	bool connectable;
1240 	u8 instance;
1241 	u32 flags;
1242 
1243 	if (hci_conn_num(hdev, LE_LINK) > 0)
1244 		return;
1245 
1246 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1247 		disable_advertising(req);
1248 
1249 	/* Clear the HCI_LE_ADV bit temporarily so that the
1250 	 * hci_update_random_address knows that it's safe to go ahead
1251 	 * and write a new random address. The flag will be set back on
1252 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1253 	 */
1254 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1255 
1256 	instance = get_current_adv_instance(hdev);
1257 	flags = get_adv_instance_flags(hdev, instance);
1258 
1259 	/* If the "connectable" instance flag was not set, then choose between
1260 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1261 	 */
1262 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1263 		      get_connectable(hdev);
1264 
1265 	/* Set require_privacy to true only when non-connectable
1266 	 * advertising is used. In that case it is fine to use a
1267 	 * non-resolvable private address.
1268 	 */
1269 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1270 		return;
1271 
1272 	memset(&cp, 0, sizeof(cp));
1273 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1274 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1275 
1276 	if (connectable)
1277 		cp.type = LE_ADV_IND;
1278 	else if (get_adv_instance_scan_rsp_len(hdev, instance))
1279 		cp.type = LE_ADV_SCAN_IND;
1280 	else
1281 		cp.type = LE_ADV_NONCONN_IND;
1282 
1283 	cp.own_address_type = own_addr_type;
1284 	cp.channel_map = hdev->le_adv_channel_map;
1285 
1286 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1287 
1288 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1289 }
1290 
1291 static void service_cache_off(struct work_struct *work)
1292 {
1293 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1294 					    service_cache.work);
1295 	struct hci_request req;
1296 
1297 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1298 		return;
1299 
1300 	hci_req_init(&req, hdev);
1301 
1302 	hci_dev_lock(hdev);
1303 
1304 	update_eir(&req);
1305 	update_class(&req);
1306 
1307 	hci_dev_unlock(hdev);
1308 
1309 	hci_req_run(&req, NULL);
1310 }
1311 
1312 static void rpa_expired(struct work_struct *work)
1313 {
1314 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1315 					    rpa_expired.work);
1316 	struct hci_request req;
1317 
1318 	BT_DBG("");
1319 
1320 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1321 
1322 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1323 		return;
1324 
1325 	/* The generation of a new RPA and programming it into the
1326 	 * controller happens in the enable_advertising() function.
1327 	 */
1328 	hci_req_init(&req, hdev);
1329 	enable_advertising(&req);
1330 	hci_req_run(&req, NULL);
1331 }
1332 
1333 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1334 {
1335 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1336 		return;
1337 
1338 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1339 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1340 
1341 	/* Non-mgmt controlled devices get this bit set
1342 	 * implicitly so that pairing works for them, however
1343 	 * for mgmt we require user-space to explicitly enable
1344 	 * it
1345 	 */
1346 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1347 }
1348 
1349 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1350 				void *data, u16 data_len)
1351 {
1352 	struct mgmt_rp_read_info rp;
1353 
1354 	BT_DBG("sock %p %s", sk, hdev->name);
1355 
1356 	hci_dev_lock(hdev);
1357 
1358 	memset(&rp, 0, sizeof(rp));
1359 
1360 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1361 
1362 	rp.version = hdev->hci_ver;
1363 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1364 
1365 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1366 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1367 
1368 	memcpy(rp.dev_class, hdev->dev_class, 3);
1369 
1370 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1371 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1372 
1373 	hci_dev_unlock(hdev);
1374 
1375 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1376 				 sizeof(rp));
1377 }
1378 
1379 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1380 {
1381 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1382 
1383 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1384 				 sizeof(settings));
1385 }
1386 
1387 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1388 {
1389 	BT_DBG("%s status 0x%02x", hdev->name, status);
1390 
1391 	if (hci_conn_count(hdev) == 0) {
1392 		cancel_delayed_work(&hdev->power_off);
1393 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1394 	}
1395 }
1396 
1397 static bool hci_stop_discovery(struct hci_request *req)
1398 {
1399 	struct hci_dev *hdev = req->hdev;
1400 	struct hci_cp_remote_name_req_cancel cp;
1401 	struct inquiry_entry *e;
1402 
1403 	switch (hdev->discovery.state) {
1404 	case DISCOVERY_FINDING:
1405 		if (test_bit(HCI_INQUIRY, &hdev->flags))
1406 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1407 
1408 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1409 			cancel_delayed_work(&hdev->le_scan_disable);
1410 			hci_req_add_le_scan_disable(req);
1411 		}
1412 
1413 		return true;
1414 
1415 	case DISCOVERY_RESOLVING:
1416 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1417 						     NAME_PENDING);
1418 		if (!e)
1419 			break;
1420 
1421 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1422 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1423 			    &cp);
1424 
1425 		return true;
1426 
1427 	default:
1428 		/* Passive scanning */
1429 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1430 			hci_req_add_le_scan_disable(req);
1431 			return true;
1432 		}
1433 
1434 		break;
1435 	}
1436 
1437 	return false;
1438 }
1439 
1440 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1441 			      u8 instance)
1442 {
1443 	struct mgmt_ev_advertising_added ev;
1444 
1445 	ev.instance = instance;
1446 
1447 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1448 }
1449 
1450 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1451 				u8 instance)
1452 {
1453 	struct mgmt_ev_advertising_removed ev;
1454 
1455 	ev.instance = instance;
1456 
1457 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1458 }
1459 
1460 static void clear_adv_instance(struct hci_dev *hdev)
1461 {
1462 	struct hci_request req;
1463 
1464 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1465 		return;
1466 
1467 	if (hdev->adv_instance.timeout)
1468 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1469 
1470 	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1471 	advertising_removed(NULL, hdev, 1);
1472 	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1473 
1474 	if (!hdev_is_powered(hdev) ||
1475 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1476 		return;
1477 
1478 	hci_req_init(&req, hdev);
1479 	disable_advertising(&req);
1480 	hci_req_run(&req, NULL);
1481 }
1482 
1483 static int clean_up_hci_state(struct hci_dev *hdev)
1484 {
1485 	struct hci_request req;
1486 	struct hci_conn *conn;
1487 	bool discov_stopped;
1488 	int err;
1489 
1490 	hci_req_init(&req, hdev);
1491 
1492 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1493 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1494 		u8 scan = 0x00;
1495 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1496 	}
1497 
1498 	if (hdev->adv_instance.timeout)
1499 		clear_adv_instance(hdev);
1500 
1501 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1502 		disable_advertising(&req);
1503 
1504 	discov_stopped = hci_stop_discovery(&req);
1505 
1506 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1507 		struct hci_cp_disconnect dc;
1508 		struct hci_cp_reject_conn_req rej;
1509 
1510 		switch (conn->state) {
1511 		case BT_CONNECTED:
1512 		case BT_CONFIG:
1513 			dc.handle = cpu_to_le16(conn->handle);
1514 			dc.reason = 0x15; /* Terminated due to Power Off */
1515 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1516 			break;
1517 		case BT_CONNECT:
1518 			if (conn->type == LE_LINK)
1519 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1520 					    0, NULL);
1521 			else if (conn->type == ACL_LINK)
1522 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1523 					    6, &conn->dst);
1524 			break;
1525 		case BT_CONNECT2:
1526 			bacpy(&rej.bdaddr, &conn->dst);
1527 			rej.reason = 0x15; /* Terminated due to Power Off */
1528 			if (conn->type == ACL_LINK)
1529 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1530 					    sizeof(rej), &rej);
1531 			else if (conn->type == SCO_LINK)
1532 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1533 					    sizeof(rej), &rej);
1534 			break;
1535 		}
1536 	}
1537 
1538 	err = hci_req_run(&req, clean_up_hci_complete);
1539 	if (!err && discov_stopped)
1540 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1541 
1542 	return err;
1543 }
1544 
1545 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1546 		       u16 len)
1547 {
1548 	struct mgmt_mode *cp = data;
1549 	struct mgmt_pending_cmd *cmd;
1550 	int err;
1551 
1552 	BT_DBG("request for %s", hdev->name);
1553 
1554 	if (cp->val != 0x00 && cp->val != 0x01)
1555 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1556 				       MGMT_STATUS_INVALID_PARAMS);
1557 
1558 	hci_dev_lock(hdev);
1559 
1560 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1562 				      MGMT_STATUS_BUSY);
1563 		goto failed;
1564 	}
1565 
1566 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1567 		cancel_delayed_work(&hdev->power_off);
1568 
1569 		if (cp->val) {
1570 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1571 					 data, len);
1572 			err = mgmt_powered(hdev, 1);
1573 			goto failed;
1574 		}
1575 	}
1576 
1577 	if (!!cp->val == hdev_is_powered(hdev)) {
1578 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1579 		goto failed;
1580 	}
1581 
1582 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1583 	if (!cmd) {
1584 		err = -ENOMEM;
1585 		goto failed;
1586 	}
1587 
1588 	if (cp->val) {
1589 		queue_work(hdev->req_workqueue, &hdev->power_on);
1590 		err = 0;
1591 	} else {
1592 		/* Disconnect connections, stop scans, etc */
1593 		err = clean_up_hci_state(hdev);
1594 		if (!err)
1595 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 					   HCI_POWER_OFF_TIMEOUT);
1597 
1598 		/* ENODATA means there were no HCI commands queued */
1599 		if (err == -ENODATA) {
1600 			cancel_delayed_work(&hdev->power_off);
1601 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1602 			err = 0;
1603 		}
1604 	}
1605 
1606 failed:
1607 	hci_dev_unlock(hdev);
1608 	return err;
1609 }
1610 
1611 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1612 {
1613 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1614 
1615 	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1616 				  sizeof(ev), skip);
1617 }
1618 
1619 int mgmt_new_settings(struct hci_dev *hdev)
1620 {
1621 	return new_settings(hdev, NULL);
1622 }
1623 
1624 struct cmd_lookup {
1625 	struct sock *sk;
1626 	struct hci_dev *hdev;
1627 	u8 mgmt_status;
1628 };
1629 
1630 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1631 {
1632 	struct cmd_lookup *match = data;
1633 
1634 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1635 
1636 	list_del(&cmd->list);
1637 
1638 	if (match->sk == NULL) {
1639 		match->sk = cmd->sk;
1640 		sock_hold(match->sk);
1641 	}
1642 
1643 	mgmt_pending_free(cmd);
1644 }
1645 
1646 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1647 {
1648 	u8 *status = data;
1649 
1650 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1651 	mgmt_pending_remove(cmd);
1652 }
1653 
1654 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1655 {
1656 	if (cmd->cmd_complete) {
1657 		u8 *status = data;
1658 
1659 		cmd->cmd_complete(cmd, *status);
1660 		mgmt_pending_remove(cmd);
1661 
1662 		return;
1663 	}
1664 
1665 	cmd_status_rsp(cmd, data);
1666 }
1667 
1668 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1669 {
1670 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1671 				 cmd->param, cmd->param_len);
1672 }
1673 
1674 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1675 {
1676 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1677 				 cmd->param, sizeof(struct mgmt_addr_info));
1678 }
1679 
1680 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1681 {
1682 	if (!lmp_bredr_capable(hdev))
1683 		return MGMT_STATUS_NOT_SUPPORTED;
1684 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1685 		return MGMT_STATUS_REJECTED;
1686 	else
1687 		return MGMT_STATUS_SUCCESS;
1688 }
1689 
1690 static u8 mgmt_le_support(struct hci_dev *hdev)
1691 {
1692 	if (!lmp_le_capable(hdev))
1693 		return MGMT_STATUS_NOT_SUPPORTED;
1694 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1695 		return MGMT_STATUS_REJECTED;
1696 	else
1697 		return MGMT_STATUS_SUCCESS;
1698 }
1699 
1700 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1701 				      u16 opcode)
1702 {
1703 	struct mgmt_pending_cmd *cmd;
1704 	struct mgmt_mode *cp;
1705 	struct hci_request req;
1706 	bool changed;
1707 
1708 	BT_DBG("status 0x%02x", status);
1709 
1710 	hci_dev_lock(hdev);
1711 
1712 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1713 	if (!cmd)
1714 		goto unlock;
1715 
1716 	if (status) {
1717 		u8 mgmt_err = mgmt_status(status);
1718 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1719 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1720 		goto remove_cmd;
1721 	}
1722 
1723 	cp = cmd->param;
1724 	if (cp->val) {
1725 		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1726 
1727 		if (hdev->discov_timeout > 0) {
1728 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1729 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1730 					   to);
1731 		}
1732 	} else {
1733 		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1734 	}
1735 
1736 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1737 
1738 	if (changed)
1739 		new_settings(hdev, cmd->sk);
1740 
1741 	/* When the discoverable mode gets changed, make sure
1742 	 * that class of device has the limited discoverable
1743 	 * bit correctly set. Also update page scan based on whitelist
1744 	 * entries.
1745 	 */
1746 	hci_req_init(&req, hdev);
1747 	__hci_update_page_scan(&req);
1748 	update_class(&req);
1749 	hci_req_run(&req, NULL);
1750 
1751 remove_cmd:
1752 	mgmt_pending_remove(cmd);
1753 
1754 unlock:
1755 	hci_dev_unlock(hdev);
1756 }
1757 
1758 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1759 			    u16 len)
1760 {
1761 	struct mgmt_cp_set_discoverable *cp = data;
1762 	struct mgmt_pending_cmd *cmd;
1763 	struct hci_request req;
1764 	u16 timeout;
1765 	u8 scan;
1766 	int err;
1767 
1768 	BT_DBG("request for %s", hdev->name);
1769 
1770 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1771 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1772 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1773 				       MGMT_STATUS_REJECTED);
1774 
1775 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1776 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1777 				       MGMT_STATUS_INVALID_PARAMS);
1778 
1779 	timeout = __le16_to_cpu(cp->timeout);
1780 
1781 	/* Disabling discoverable requires that no timeout is set,
1782 	 * and enabling limited discoverable requires a timeout.
1783 	 */
1784 	if ((cp->val == 0x00 && timeout > 0) ||
1785 	    (cp->val == 0x02 && timeout == 0))
1786 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1787 				       MGMT_STATUS_INVALID_PARAMS);
1788 
1789 	hci_dev_lock(hdev);
1790 
1791 	if (!hdev_is_powered(hdev) && timeout > 0) {
1792 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1793 				      MGMT_STATUS_NOT_POWERED);
1794 		goto failed;
1795 	}
1796 
1797 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1798 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1799 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1800 				      MGMT_STATUS_BUSY);
1801 		goto failed;
1802 	}
1803 
1804 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1805 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1806 				      MGMT_STATUS_REJECTED);
1807 		goto failed;
1808 	}
1809 
1810 	if (!hdev_is_powered(hdev)) {
1811 		bool changed = false;
1812 
1813 		/* Setting limited discoverable when powered off is
1814 		 * not a valid operation since it requires a timeout
1815 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1816 		 */
1817 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1818 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1819 			changed = true;
1820 		}
1821 
1822 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1823 		if (err < 0)
1824 			goto failed;
1825 
1826 		if (changed)
1827 			err = new_settings(hdev, sk);
1828 
1829 		goto failed;
1830 	}
1831 
1832 	/* If the current mode is the same, then just update the timeout
1833 	 * value with the new value. And if only the timeout gets updated,
1834 	 * then no need for any HCI transactions.
1835 	 */
1836 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1837 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1838 						   HCI_LIMITED_DISCOVERABLE)) {
1839 		cancel_delayed_work(&hdev->discov_off);
1840 		hdev->discov_timeout = timeout;
1841 
1842 		if (cp->val && hdev->discov_timeout > 0) {
1843 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1844 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1845 					   to);
1846 		}
1847 
1848 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1849 		goto failed;
1850 	}
1851 
1852 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1853 	if (!cmd) {
1854 		err = -ENOMEM;
1855 		goto failed;
1856 	}
1857 
1858 	/* Cancel any potential discoverable timeout that might be
1859 	 * still active and store new timeout value. The arming of
1860 	 * the timeout happens in the complete handler.
1861 	 */
1862 	cancel_delayed_work(&hdev->discov_off);
1863 	hdev->discov_timeout = timeout;
1864 
1865 	/* Limited discoverable mode */
1866 	if (cp->val == 0x02)
1867 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1868 	else
1869 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1870 
1871 	hci_req_init(&req, hdev);
1872 
1873 	/* The procedure for LE-only controllers is much simpler - just
1874 	 * update the advertising data.
1875 	 */
1876 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1877 		goto update_ad;
1878 
1879 	scan = SCAN_PAGE;
1880 
1881 	if (cp->val) {
1882 		struct hci_cp_write_current_iac_lap hci_cp;
1883 
1884 		if (cp->val == 0x02) {
1885 			/* Limited discoverable mode */
1886 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1887 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1888 			hci_cp.iac_lap[1] = 0x8b;
1889 			hci_cp.iac_lap[2] = 0x9e;
1890 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1891 			hci_cp.iac_lap[4] = 0x8b;
1892 			hci_cp.iac_lap[5] = 0x9e;
1893 		} else {
1894 			/* General discoverable mode */
1895 			hci_cp.num_iac = 1;
1896 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1897 			hci_cp.iac_lap[1] = 0x8b;
1898 			hci_cp.iac_lap[2] = 0x9e;
1899 		}
1900 
1901 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1902 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1903 
1904 		scan |= SCAN_INQUIRY;
1905 	} else {
1906 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1907 	}
1908 
1909 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1910 
1911 update_ad:
1912 	update_adv_data(&req);
1913 
1914 	err = hci_req_run(&req, set_discoverable_complete);
1915 	if (err < 0)
1916 		mgmt_pending_remove(cmd);
1917 
1918 failed:
1919 	hci_dev_unlock(hdev);
1920 	return err;
1921 }
1922 
1923 static void write_fast_connectable(struct hci_request *req, bool enable)
1924 {
1925 	struct hci_dev *hdev = req->hdev;
1926 	struct hci_cp_write_page_scan_activity acp;
1927 	u8 type;
1928 
1929 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1930 		return;
1931 
1932 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1933 		return;
1934 
1935 	if (enable) {
1936 		type = PAGE_SCAN_TYPE_INTERLACED;
1937 
1938 		/* 160 msec page scan interval */
1939 		acp.interval = cpu_to_le16(0x0100);
1940 	} else {
1941 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1942 
1943 		/* default 1.28 sec page scan */
1944 		acp.interval = cpu_to_le16(0x0800);
1945 	}
1946 
1947 	acp.window = cpu_to_le16(0x0012);
1948 
1949 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1950 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1951 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1952 			    sizeof(acp), &acp);
1953 
1954 	if (hdev->page_scan_type != type)
1955 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1956 }
1957 
1958 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1959 				     u16 opcode)
1960 {
1961 	struct mgmt_pending_cmd *cmd;
1962 	struct mgmt_mode *cp;
1963 	bool conn_changed, discov_changed;
1964 
1965 	BT_DBG("status 0x%02x", status);
1966 
1967 	hci_dev_lock(hdev);
1968 
1969 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1970 	if (!cmd)
1971 		goto unlock;
1972 
1973 	if (status) {
1974 		u8 mgmt_err = mgmt_status(status);
1975 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1976 		goto remove_cmd;
1977 	}
1978 
1979 	cp = cmd->param;
1980 	if (cp->val) {
1981 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1982 							  HCI_CONNECTABLE);
1983 		discov_changed = false;
1984 	} else {
1985 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1986 							   HCI_CONNECTABLE);
1987 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1988 							     HCI_DISCOVERABLE);
1989 	}
1990 
1991 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1992 
1993 	if (conn_changed || discov_changed) {
1994 		new_settings(hdev, cmd->sk);
1995 		hci_update_page_scan(hdev);
1996 		if (discov_changed)
1997 			mgmt_update_adv_data(hdev);
1998 		hci_update_background_scan(hdev);
1999 	}
2000 
2001 remove_cmd:
2002 	mgmt_pending_remove(cmd);
2003 
2004 unlock:
2005 	hci_dev_unlock(hdev);
2006 }
2007 
2008 static int set_connectable_update_settings(struct hci_dev *hdev,
2009 					   struct sock *sk, u8 val)
2010 {
2011 	bool changed = false;
2012 	int err;
2013 
2014 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2015 		changed = true;
2016 
2017 	if (val) {
2018 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2019 	} else {
2020 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2021 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2022 	}
2023 
2024 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2025 	if (err < 0)
2026 		return err;
2027 
2028 	if (changed) {
2029 		hci_update_page_scan(hdev);
2030 		hci_update_background_scan(hdev);
2031 		return new_settings(hdev, sk);
2032 	}
2033 
2034 	return 0;
2035 }
2036 
2037 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2038 			   u16 len)
2039 {
2040 	struct mgmt_mode *cp = data;
2041 	struct mgmt_pending_cmd *cmd;
2042 	struct hci_request req;
2043 	u8 scan;
2044 	int err;
2045 
2046 	BT_DBG("request for %s", hdev->name);
2047 
2048 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2049 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2050 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2051 				       MGMT_STATUS_REJECTED);
2052 
2053 	if (cp->val != 0x00 && cp->val != 0x01)
2054 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2055 				       MGMT_STATUS_INVALID_PARAMS);
2056 
2057 	hci_dev_lock(hdev);
2058 
2059 	if (!hdev_is_powered(hdev)) {
2060 		err = set_connectable_update_settings(hdev, sk, cp->val);
2061 		goto failed;
2062 	}
2063 
2064 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2065 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2066 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2067 				      MGMT_STATUS_BUSY);
2068 		goto failed;
2069 	}
2070 
2071 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2072 	if (!cmd) {
2073 		err = -ENOMEM;
2074 		goto failed;
2075 	}
2076 
2077 	hci_req_init(&req, hdev);
2078 
2079 	/* If BR/EDR is not enabled and we disable advertising as a
2080 	 * by-product of disabling connectable, we need to update the
2081 	 * advertising flags.
2082 	 */
2083 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2084 		if (!cp->val) {
2085 			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2086 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2087 		}
2088 		update_adv_data(&req);
2089 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2090 		if (cp->val) {
2091 			scan = SCAN_PAGE;
2092 		} else {
2093 			/* If we don't have any whitelist entries just
2094 			 * disable all scanning. If there are entries
2095 			 * and we had both page and inquiry scanning
2096 			 * enabled then fall back to only page scanning.
2097 			 * Otherwise no changes are needed.
2098 			 */
2099 			if (list_empty(&hdev->whitelist))
2100 				scan = SCAN_DISABLED;
2101 			else if (test_bit(HCI_ISCAN, &hdev->flags))
2102 				scan = SCAN_PAGE;
2103 			else
2104 				goto no_scan_update;
2105 
2106 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2107 			    hdev->discov_timeout > 0)
2108 				cancel_delayed_work(&hdev->discov_off);
2109 		}
2110 
2111 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2112 	}
2113 
2114 no_scan_update:
2115 	/* Update the advertising parameters if necessary */
2116 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2117 	    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2118 		enable_advertising(&req);
2119 
2120 	err = hci_req_run(&req, set_connectable_complete);
2121 	if (err < 0) {
2122 		mgmt_pending_remove(cmd);
2123 		if (err == -ENODATA)
2124 			err = set_connectable_update_settings(hdev, sk,
2125 							      cp->val);
2126 		goto failed;
2127 	}
2128 
2129 failed:
2130 	hci_dev_unlock(hdev);
2131 	return err;
2132 }
2133 
2134 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2135 			u16 len)
2136 {
2137 	struct mgmt_mode *cp = data;
2138 	bool changed;
2139 	int err;
2140 
2141 	BT_DBG("request for %s", hdev->name);
2142 
2143 	if (cp->val != 0x00 && cp->val != 0x01)
2144 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2145 				       MGMT_STATUS_INVALID_PARAMS);
2146 
2147 	hci_dev_lock(hdev);
2148 
2149 	if (cp->val)
2150 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2151 	else
2152 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2153 
2154 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2155 	if (err < 0)
2156 		goto unlock;
2157 
2158 	if (changed)
2159 		err = new_settings(hdev, sk);
2160 
2161 unlock:
2162 	hci_dev_unlock(hdev);
2163 	return err;
2164 }
2165 
2166 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2167 			     u16 len)
2168 {
2169 	struct mgmt_mode *cp = data;
2170 	struct mgmt_pending_cmd *cmd;
2171 	u8 val, status;
2172 	int err;
2173 
2174 	BT_DBG("request for %s", hdev->name);
2175 
2176 	status = mgmt_bredr_support(hdev);
2177 	if (status)
2178 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2179 				       status);
2180 
2181 	if (cp->val != 0x00 && cp->val != 0x01)
2182 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2183 				       MGMT_STATUS_INVALID_PARAMS);
2184 
2185 	hci_dev_lock(hdev);
2186 
2187 	if (!hdev_is_powered(hdev)) {
2188 		bool changed = false;
2189 
2190 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2191 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2192 			changed = true;
2193 		}
2194 
2195 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2196 		if (err < 0)
2197 			goto failed;
2198 
2199 		if (changed)
2200 			err = new_settings(hdev, sk);
2201 
2202 		goto failed;
2203 	}
2204 
2205 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2206 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2207 				      MGMT_STATUS_BUSY);
2208 		goto failed;
2209 	}
2210 
2211 	val = !!cp->val;
2212 
2213 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2214 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2215 		goto failed;
2216 	}
2217 
2218 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2219 	if (!cmd) {
2220 		err = -ENOMEM;
2221 		goto failed;
2222 	}
2223 
2224 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2225 	if (err < 0) {
2226 		mgmt_pending_remove(cmd);
2227 		goto failed;
2228 	}
2229 
2230 failed:
2231 	hci_dev_unlock(hdev);
2232 	return err;
2233 }
2234 
2235 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2236 {
2237 	struct mgmt_mode *cp = data;
2238 	struct mgmt_pending_cmd *cmd;
2239 	u8 status;
2240 	int err;
2241 
2242 	BT_DBG("request for %s", hdev->name);
2243 
2244 	status = mgmt_bredr_support(hdev);
2245 	if (status)
2246 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2247 
2248 	if (!lmp_ssp_capable(hdev))
2249 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2250 				       MGMT_STATUS_NOT_SUPPORTED);
2251 
2252 	if (cp->val != 0x00 && cp->val != 0x01)
2253 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2254 				       MGMT_STATUS_INVALID_PARAMS);
2255 
2256 	hci_dev_lock(hdev);
2257 
2258 	if (!hdev_is_powered(hdev)) {
2259 		bool changed;
2260 
2261 		if (cp->val) {
2262 			changed = !hci_dev_test_and_set_flag(hdev,
2263 							     HCI_SSP_ENABLED);
2264 		} else {
2265 			changed = hci_dev_test_and_clear_flag(hdev,
2266 							      HCI_SSP_ENABLED);
2267 			if (!changed)
2268 				changed = hci_dev_test_and_clear_flag(hdev,
2269 								      HCI_HS_ENABLED);
2270 			else
2271 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2272 		}
2273 
2274 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2275 		if (err < 0)
2276 			goto failed;
2277 
2278 		if (changed)
2279 			err = new_settings(hdev, sk);
2280 
2281 		goto failed;
2282 	}
2283 
2284 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2285 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2286 				      MGMT_STATUS_BUSY);
2287 		goto failed;
2288 	}
2289 
2290 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2291 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2292 		goto failed;
2293 	}
2294 
2295 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2296 	if (!cmd) {
2297 		err = -ENOMEM;
2298 		goto failed;
2299 	}
2300 
2301 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2302 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2303 			     sizeof(cp->val), &cp->val);
2304 
2305 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2306 	if (err < 0) {
2307 		mgmt_pending_remove(cmd);
2308 		goto failed;
2309 	}
2310 
2311 failed:
2312 	hci_dev_unlock(hdev);
2313 	return err;
2314 }
2315 
2316 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2317 {
2318 	struct mgmt_mode *cp = data;
2319 	bool changed;
2320 	u8 status;
2321 	int err;
2322 
2323 	BT_DBG("request for %s", hdev->name);
2324 
2325 	status = mgmt_bredr_support(hdev);
2326 	if (status)
2327 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2328 
2329 	if (!lmp_ssp_capable(hdev))
2330 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2331 				       MGMT_STATUS_NOT_SUPPORTED);
2332 
2333 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2334 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2335 				       MGMT_STATUS_REJECTED);
2336 
2337 	if (cp->val != 0x00 && cp->val != 0x01)
2338 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2339 				       MGMT_STATUS_INVALID_PARAMS);
2340 
2341 	hci_dev_lock(hdev);
2342 
2343 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2344 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2345 				      MGMT_STATUS_BUSY);
2346 		goto unlock;
2347 	}
2348 
2349 	if (cp->val) {
2350 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2351 	} else {
2352 		if (hdev_is_powered(hdev)) {
2353 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2354 					      MGMT_STATUS_REJECTED);
2355 			goto unlock;
2356 		}
2357 
2358 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2359 	}
2360 
2361 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2362 	if (err < 0)
2363 		goto unlock;
2364 
2365 	if (changed)
2366 		err = new_settings(hdev, sk);
2367 
2368 unlock:
2369 	hci_dev_unlock(hdev);
2370 	return err;
2371 }
2372 
2373 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2374 {
2375 	struct cmd_lookup match = { NULL, hdev };
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	if (status) {
2380 		u8 mgmt_err = mgmt_status(status);
2381 
2382 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2383 				     &mgmt_err);
2384 		goto unlock;
2385 	}
2386 
2387 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2388 
2389 	new_settings(hdev, match.sk);
2390 
2391 	if (match.sk)
2392 		sock_put(match.sk);
2393 
2394 	/* Make sure the controller has a good default for
2395 	 * advertising data. Restrict the update to when LE
2396 	 * has actually been enabled. During power on, the
2397 	 * update in powered_update_hci will take care of it.
2398 	 */
2399 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2400 		struct hci_request req;
2401 
2402 		hci_req_init(&req, hdev);
2403 		update_adv_data(&req);
2404 		update_scan_rsp_data(&req);
2405 		__hci_update_background_scan(&req);
2406 		hci_req_run(&req, NULL);
2407 	}
2408 
2409 unlock:
2410 	hci_dev_unlock(hdev);
2411 }
2412 
2413 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2414 {
2415 	struct mgmt_mode *cp = data;
2416 	struct hci_cp_write_le_host_supported hci_cp;
2417 	struct mgmt_pending_cmd *cmd;
2418 	struct hci_request req;
2419 	int err;
2420 	u8 val, enabled;
2421 
2422 	BT_DBG("request for %s", hdev->name);
2423 
2424 	if (!lmp_le_capable(hdev))
2425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2426 				       MGMT_STATUS_NOT_SUPPORTED);
2427 
2428 	if (cp->val != 0x00 && cp->val != 0x01)
2429 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2430 				       MGMT_STATUS_INVALID_PARAMS);
2431 
2432 	/* Bluetooth single mode LE only controllers or dual-mode
2433 	 * controllers configured as LE only devices, do not allow
2434 	 * switching LE off. These have either LE enabled explicitly
2435 	 * or BR/EDR has been previously switched off.
2436 	 *
2437 	 * When trying to enable an already enabled LE, then gracefully
2438 	 * send a positive response. Trying to disable it however will
2439 	 * result into rejection.
2440 	 */
2441 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2442 		if (cp->val == 0x01)
2443 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2444 
2445 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2446 				       MGMT_STATUS_REJECTED);
2447 	}
2448 
2449 	hci_dev_lock(hdev);
2450 
2451 	val = !!cp->val;
2452 	enabled = lmp_host_le_capable(hdev);
2453 
2454 	if (!hdev_is_powered(hdev) || val == enabled) {
2455 		bool changed = false;
2456 
2457 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2458 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2459 			changed = true;
2460 		}
2461 
2462 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2463 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2464 			changed = true;
2465 		}
2466 
2467 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2468 		if (err < 0)
2469 			goto unlock;
2470 
2471 		if (changed)
2472 			err = new_settings(hdev, sk);
2473 
2474 		goto unlock;
2475 	}
2476 
2477 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2478 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2479 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2480 				      MGMT_STATUS_BUSY);
2481 		goto unlock;
2482 	}
2483 
2484 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2485 	if (!cmd) {
2486 		err = -ENOMEM;
2487 		goto unlock;
2488 	}
2489 
2490 	hci_req_init(&req, hdev);
2491 
2492 	memset(&hci_cp, 0, sizeof(hci_cp));
2493 
2494 	if (val) {
2495 		hci_cp.le = val;
2496 		hci_cp.simul = 0x00;
2497 	} else {
2498 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2499 			disable_advertising(&req);
2500 	}
2501 
2502 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2503 		    &hci_cp);
2504 
2505 	err = hci_req_run(&req, le_enable_complete);
2506 	if (err < 0)
2507 		mgmt_pending_remove(cmd);
2508 
2509 unlock:
2510 	hci_dev_unlock(hdev);
2511 	return err;
2512 }
2513 
2514 /* This is a helper function to test for pending mgmt commands that can
2515  * cause CoD or EIR HCI commands. We can only allow one such pending
2516  * mgmt command at a time since otherwise we cannot easily track what
2517  * the current values are, will be, and based on that calculate if a new
2518  * HCI command needs to be sent and if yes with what value.
2519  */
2520 static bool pending_eir_or_class(struct hci_dev *hdev)
2521 {
2522 	struct mgmt_pending_cmd *cmd;
2523 
2524 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2525 		switch (cmd->opcode) {
2526 		case MGMT_OP_ADD_UUID:
2527 		case MGMT_OP_REMOVE_UUID:
2528 		case MGMT_OP_SET_DEV_CLASS:
2529 		case MGMT_OP_SET_POWERED:
2530 			return true;
2531 		}
2532 	}
2533 
2534 	return false;
2535 }
2536 
2537 static const u8 bluetooth_base_uuid[] = {
2538 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2539 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2540 };
2541 
2542 static u8 get_uuid_size(const u8 *uuid)
2543 {
2544 	u32 val;
2545 
2546 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2547 		return 128;
2548 
2549 	val = get_unaligned_le32(&uuid[12]);
2550 	if (val > 0xffff)
2551 		return 32;
2552 
2553 	return 16;
2554 }
2555 
2556 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2557 {
2558 	struct mgmt_pending_cmd *cmd;
2559 
2560 	hci_dev_lock(hdev);
2561 
2562 	cmd = pending_find(mgmt_op, hdev);
2563 	if (!cmd)
2564 		goto unlock;
2565 
2566 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2567 			  mgmt_status(status), hdev->dev_class, 3);
2568 
2569 	mgmt_pending_remove(cmd);
2570 
2571 unlock:
2572 	hci_dev_unlock(hdev);
2573 }
2574 
2575 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2576 {
2577 	BT_DBG("status 0x%02x", status);
2578 
2579 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2580 }
2581 
2582 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2583 {
2584 	struct mgmt_cp_add_uuid *cp = data;
2585 	struct mgmt_pending_cmd *cmd;
2586 	struct hci_request req;
2587 	struct bt_uuid *uuid;
2588 	int err;
2589 
2590 	BT_DBG("request for %s", hdev->name);
2591 
2592 	hci_dev_lock(hdev);
2593 
2594 	if (pending_eir_or_class(hdev)) {
2595 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2596 				      MGMT_STATUS_BUSY);
2597 		goto failed;
2598 	}
2599 
2600 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2601 	if (!uuid) {
2602 		err = -ENOMEM;
2603 		goto failed;
2604 	}
2605 
2606 	memcpy(uuid->uuid, cp->uuid, 16);
2607 	uuid->svc_hint = cp->svc_hint;
2608 	uuid->size = get_uuid_size(cp->uuid);
2609 
2610 	list_add_tail(&uuid->list, &hdev->uuids);
2611 
2612 	hci_req_init(&req, hdev);
2613 
2614 	update_class(&req);
2615 	update_eir(&req);
2616 
2617 	err = hci_req_run(&req, add_uuid_complete);
2618 	if (err < 0) {
2619 		if (err != -ENODATA)
2620 			goto failed;
2621 
2622 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2623 					hdev->dev_class, 3);
2624 		goto failed;
2625 	}
2626 
2627 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2628 	if (!cmd) {
2629 		err = -ENOMEM;
2630 		goto failed;
2631 	}
2632 
2633 	err = 0;
2634 
2635 failed:
2636 	hci_dev_unlock(hdev);
2637 	return err;
2638 }
2639 
2640 static bool enable_service_cache(struct hci_dev *hdev)
2641 {
2642 	if (!hdev_is_powered(hdev))
2643 		return false;
2644 
2645 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2646 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2647 				   CACHE_TIMEOUT);
2648 		return true;
2649 	}
2650 
2651 	return false;
2652 }
2653 
2654 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2655 {
2656 	BT_DBG("status 0x%02x", status);
2657 
2658 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2659 }
2660 
2661 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2662 		       u16 len)
2663 {
2664 	struct mgmt_cp_remove_uuid *cp = data;
2665 	struct mgmt_pending_cmd *cmd;
2666 	struct bt_uuid *match, *tmp;
2667 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2668 	struct hci_request req;
2669 	int err, found;
2670 
2671 	BT_DBG("request for %s", hdev->name);
2672 
2673 	hci_dev_lock(hdev);
2674 
2675 	if (pending_eir_or_class(hdev)) {
2676 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2677 				      MGMT_STATUS_BUSY);
2678 		goto unlock;
2679 	}
2680 
2681 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2682 		hci_uuids_clear(hdev);
2683 
2684 		if (enable_service_cache(hdev)) {
2685 			err = mgmt_cmd_complete(sk, hdev->id,
2686 						MGMT_OP_REMOVE_UUID,
2687 						0, hdev->dev_class, 3);
2688 			goto unlock;
2689 		}
2690 
2691 		goto update_class;
2692 	}
2693 
2694 	found = 0;
2695 
2696 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2697 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2698 			continue;
2699 
2700 		list_del(&match->list);
2701 		kfree(match);
2702 		found++;
2703 	}
2704 
2705 	if (found == 0) {
2706 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2707 				      MGMT_STATUS_INVALID_PARAMS);
2708 		goto unlock;
2709 	}
2710 
2711 update_class:
2712 	hci_req_init(&req, hdev);
2713 
2714 	update_class(&req);
2715 	update_eir(&req);
2716 
2717 	err = hci_req_run(&req, remove_uuid_complete);
2718 	if (err < 0) {
2719 		if (err != -ENODATA)
2720 			goto unlock;
2721 
2722 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2723 					hdev->dev_class, 3);
2724 		goto unlock;
2725 	}
2726 
2727 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2728 	if (!cmd) {
2729 		err = -ENOMEM;
2730 		goto unlock;
2731 	}
2732 
2733 	err = 0;
2734 
2735 unlock:
2736 	hci_dev_unlock(hdev);
2737 	return err;
2738 }
2739 
2740 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2741 {
2742 	BT_DBG("status 0x%02x", status);
2743 
2744 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2745 }
2746 
2747 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2748 			 u16 len)
2749 {
2750 	struct mgmt_cp_set_dev_class *cp = data;
2751 	struct mgmt_pending_cmd *cmd;
2752 	struct hci_request req;
2753 	int err;
2754 
2755 	BT_DBG("request for %s", hdev->name);
2756 
2757 	if (!lmp_bredr_capable(hdev))
2758 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2759 				       MGMT_STATUS_NOT_SUPPORTED);
2760 
2761 	hci_dev_lock(hdev);
2762 
2763 	if (pending_eir_or_class(hdev)) {
2764 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2765 				      MGMT_STATUS_BUSY);
2766 		goto unlock;
2767 	}
2768 
2769 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2770 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2771 				      MGMT_STATUS_INVALID_PARAMS);
2772 		goto unlock;
2773 	}
2774 
2775 	hdev->major_class = cp->major;
2776 	hdev->minor_class = cp->minor;
2777 
2778 	if (!hdev_is_powered(hdev)) {
2779 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2780 					hdev->dev_class, 3);
2781 		goto unlock;
2782 	}
2783 
2784 	hci_req_init(&req, hdev);
2785 
2786 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2787 		hci_dev_unlock(hdev);
2788 		cancel_delayed_work_sync(&hdev->service_cache);
2789 		hci_dev_lock(hdev);
2790 		update_eir(&req);
2791 	}
2792 
2793 	update_class(&req);
2794 
2795 	err = hci_req_run(&req, set_class_complete);
2796 	if (err < 0) {
2797 		if (err != -ENODATA)
2798 			goto unlock;
2799 
2800 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2801 					hdev->dev_class, 3);
2802 		goto unlock;
2803 	}
2804 
2805 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2806 	if (!cmd) {
2807 		err = -ENOMEM;
2808 		goto unlock;
2809 	}
2810 
2811 	err = 0;
2812 
2813 unlock:
2814 	hci_dev_unlock(hdev);
2815 	return err;
2816 }
2817 
2818 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2819 			  u16 len)
2820 {
2821 	struct mgmt_cp_load_link_keys *cp = data;
2822 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2823 				   sizeof(struct mgmt_link_key_info));
2824 	u16 key_count, expected_len;
2825 	bool changed;
2826 	int i;
2827 
2828 	BT_DBG("request for %s", hdev->name);
2829 
2830 	if (!lmp_bredr_capable(hdev))
2831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2832 				       MGMT_STATUS_NOT_SUPPORTED);
2833 
2834 	key_count = __le16_to_cpu(cp->key_count);
2835 	if (key_count > max_key_count) {
2836 		BT_ERR("load_link_keys: too big key_count value %u",
2837 		       key_count);
2838 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2839 				       MGMT_STATUS_INVALID_PARAMS);
2840 	}
2841 
2842 	expected_len = sizeof(*cp) + key_count *
2843 					sizeof(struct mgmt_link_key_info);
2844 	if (expected_len != len) {
2845 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2846 		       expected_len, len);
2847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2848 				       MGMT_STATUS_INVALID_PARAMS);
2849 	}
2850 
2851 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2853 				       MGMT_STATUS_INVALID_PARAMS);
2854 
2855 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2856 	       key_count);
2857 
2858 	for (i = 0; i < key_count; i++) {
2859 		struct mgmt_link_key_info *key = &cp->keys[i];
2860 
2861 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2862 			return mgmt_cmd_status(sk, hdev->id,
2863 					       MGMT_OP_LOAD_LINK_KEYS,
2864 					       MGMT_STATUS_INVALID_PARAMS);
2865 	}
2866 
2867 	hci_dev_lock(hdev);
2868 
2869 	hci_link_keys_clear(hdev);
2870 
2871 	if (cp->debug_keys)
2872 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2873 	else
2874 		changed = hci_dev_test_and_clear_flag(hdev,
2875 						      HCI_KEEP_DEBUG_KEYS);
2876 
2877 	if (changed)
2878 		new_settings(hdev, NULL);
2879 
2880 	for (i = 0; i < key_count; i++) {
2881 		struct mgmt_link_key_info *key = &cp->keys[i];
2882 
2883 		/* Always ignore debug keys and require a new pairing if
2884 		 * the user wants to use them.
2885 		 */
2886 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2887 			continue;
2888 
2889 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2890 				 key->type, key->pin_len, NULL);
2891 	}
2892 
2893 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2894 
2895 	hci_dev_unlock(hdev);
2896 
2897 	return 0;
2898 }
2899 
2900 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2901 			   u8 addr_type, struct sock *skip_sk)
2902 {
2903 	struct mgmt_ev_device_unpaired ev;
2904 
2905 	bacpy(&ev.addr.bdaddr, bdaddr);
2906 	ev.addr.type = addr_type;
2907 
2908 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2909 			  skip_sk);
2910 }
2911 
2912 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2913 			 u16 len)
2914 {
2915 	struct mgmt_cp_unpair_device *cp = data;
2916 	struct mgmt_rp_unpair_device rp;
2917 	struct hci_cp_disconnect dc;
2918 	struct mgmt_pending_cmd *cmd;
2919 	struct hci_conn *conn;
2920 	int err;
2921 
2922 	memset(&rp, 0, sizeof(rp));
2923 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2924 	rp.addr.type = cp->addr.type;
2925 
2926 	if (!bdaddr_type_is_valid(cp->addr.type))
2927 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2928 					 MGMT_STATUS_INVALID_PARAMS,
2929 					 &rp, sizeof(rp));
2930 
2931 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2932 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2933 					 MGMT_STATUS_INVALID_PARAMS,
2934 					 &rp, sizeof(rp));
2935 
2936 	hci_dev_lock(hdev);
2937 
2938 	if (!hdev_is_powered(hdev)) {
2939 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2940 					MGMT_STATUS_NOT_POWERED, &rp,
2941 					sizeof(rp));
2942 		goto unlock;
2943 	}
2944 
2945 	if (cp->addr.type == BDADDR_BREDR) {
2946 		/* If disconnection is requested, then look up the
2947 		 * connection. If the remote device is connected, it
2948 		 * will be later used to terminate the link.
2949 		 *
2950 		 * Setting it to NULL explicitly will cause no
2951 		 * termination of the link.
2952 		 */
2953 		if (cp->disconnect)
2954 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2955 						       &cp->addr.bdaddr);
2956 		else
2957 			conn = NULL;
2958 
2959 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2960 	} else {
2961 		u8 addr_type;
2962 
2963 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2964 					       &cp->addr.bdaddr);
2965 		if (conn) {
2966 			/* Defer clearing up the connection parameters
2967 			 * until closing to give a chance of keeping
2968 			 * them if a repairing happens.
2969 			 */
2970 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2971 
2972 			/* If disconnection is not requested, then
2973 			 * clear the connection variable so that the
2974 			 * link is not terminated.
2975 			 */
2976 			if (!cp->disconnect)
2977 				conn = NULL;
2978 		}
2979 
2980 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2981 			addr_type = ADDR_LE_DEV_PUBLIC;
2982 		else
2983 			addr_type = ADDR_LE_DEV_RANDOM;
2984 
2985 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2986 
2987 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2988 	}
2989 
2990 	if (err < 0) {
2991 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2992 					MGMT_STATUS_NOT_PAIRED, &rp,
2993 					sizeof(rp));
2994 		goto unlock;
2995 	}
2996 
2997 	/* If the connection variable is set, then termination of the
2998 	 * link is requested.
2999 	 */
3000 	if (!conn) {
3001 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3002 					&rp, sizeof(rp));
3003 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3004 		goto unlock;
3005 	}
3006 
3007 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3008 			       sizeof(*cp));
3009 	if (!cmd) {
3010 		err = -ENOMEM;
3011 		goto unlock;
3012 	}
3013 
3014 	cmd->cmd_complete = addr_cmd_complete;
3015 
3016 	dc.handle = cpu_to_le16(conn->handle);
3017 	dc.reason = 0x13; /* Remote User Terminated Connection */
3018 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3019 	if (err < 0)
3020 		mgmt_pending_remove(cmd);
3021 
3022 unlock:
3023 	hci_dev_unlock(hdev);
3024 	return err;
3025 }
3026 
3027 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3028 		      u16 len)
3029 {
3030 	struct mgmt_cp_disconnect *cp = data;
3031 	struct mgmt_rp_disconnect rp;
3032 	struct mgmt_pending_cmd *cmd;
3033 	struct hci_conn *conn;
3034 	int err;
3035 
3036 	BT_DBG("");
3037 
3038 	memset(&rp, 0, sizeof(rp));
3039 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3040 	rp.addr.type = cp->addr.type;
3041 
3042 	if (!bdaddr_type_is_valid(cp->addr.type))
3043 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3044 					 MGMT_STATUS_INVALID_PARAMS,
3045 					 &rp, sizeof(rp));
3046 
3047 	hci_dev_lock(hdev);
3048 
3049 	if (!test_bit(HCI_UP, &hdev->flags)) {
3050 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3051 					MGMT_STATUS_NOT_POWERED, &rp,
3052 					sizeof(rp));
3053 		goto failed;
3054 	}
3055 
3056 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3057 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3058 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3059 		goto failed;
3060 	}
3061 
3062 	if (cp->addr.type == BDADDR_BREDR)
3063 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3064 					       &cp->addr.bdaddr);
3065 	else
3066 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3067 
3068 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3069 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3070 					MGMT_STATUS_NOT_CONNECTED, &rp,
3071 					sizeof(rp));
3072 		goto failed;
3073 	}
3074 
3075 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3076 	if (!cmd) {
3077 		err = -ENOMEM;
3078 		goto failed;
3079 	}
3080 
3081 	cmd->cmd_complete = generic_cmd_complete;
3082 
3083 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3084 	if (err < 0)
3085 		mgmt_pending_remove(cmd);
3086 
3087 failed:
3088 	hci_dev_unlock(hdev);
3089 	return err;
3090 }
3091 
3092 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3093 {
3094 	switch (link_type) {
3095 	case LE_LINK:
3096 		switch (addr_type) {
3097 		case ADDR_LE_DEV_PUBLIC:
3098 			return BDADDR_LE_PUBLIC;
3099 
3100 		default:
3101 			/* Fallback to LE Random address type */
3102 			return BDADDR_LE_RANDOM;
3103 		}
3104 
3105 	default:
3106 		/* Fallback to BR/EDR type */
3107 		return BDADDR_BREDR;
3108 	}
3109 }
3110 
3111 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3112 			   u16 data_len)
3113 {
3114 	struct mgmt_rp_get_connections *rp;
3115 	struct hci_conn *c;
3116 	size_t rp_len;
3117 	int err;
3118 	u16 i;
3119 
3120 	BT_DBG("");
3121 
3122 	hci_dev_lock(hdev);
3123 
3124 	if (!hdev_is_powered(hdev)) {
3125 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3126 				      MGMT_STATUS_NOT_POWERED);
3127 		goto unlock;
3128 	}
3129 
3130 	i = 0;
3131 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3132 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3133 			i++;
3134 	}
3135 
3136 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3137 	rp = kmalloc(rp_len, GFP_KERNEL);
3138 	if (!rp) {
3139 		err = -ENOMEM;
3140 		goto unlock;
3141 	}
3142 
3143 	i = 0;
3144 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3145 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3146 			continue;
3147 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3148 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3149 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3150 			continue;
3151 		i++;
3152 	}
3153 
3154 	rp->conn_count = cpu_to_le16(i);
3155 
3156 	/* Recalculate length in case of filtered SCO connections, etc */
3157 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3158 
3159 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3160 				rp_len);
3161 
3162 	kfree(rp);
3163 
3164 unlock:
3165 	hci_dev_unlock(hdev);
3166 	return err;
3167 }
3168 
3169 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3170 				   struct mgmt_cp_pin_code_neg_reply *cp)
3171 {
3172 	struct mgmt_pending_cmd *cmd;
3173 	int err;
3174 
3175 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3176 			       sizeof(*cp));
3177 	if (!cmd)
3178 		return -ENOMEM;
3179 
3180 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3181 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3182 	if (err < 0)
3183 		mgmt_pending_remove(cmd);
3184 
3185 	return err;
3186 }
3187 
3188 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3189 			  u16 len)
3190 {
3191 	struct hci_conn *conn;
3192 	struct mgmt_cp_pin_code_reply *cp = data;
3193 	struct hci_cp_pin_code_reply reply;
3194 	struct mgmt_pending_cmd *cmd;
3195 	int err;
3196 
3197 	BT_DBG("");
3198 
3199 	hci_dev_lock(hdev);
3200 
3201 	if (!hdev_is_powered(hdev)) {
3202 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3203 				      MGMT_STATUS_NOT_POWERED);
3204 		goto failed;
3205 	}
3206 
3207 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3208 	if (!conn) {
3209 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3210 				      MGMT_STATUS_NOT_CONNECTED);
3211 		goto failed;
3212 	}
3213 
3214 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3215 		struct mgmt_cp_pin_code_neg_reply ncp;
3216 
3217 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3218 
3219 		BT_ERR("PIN code is not 16 bytes long");
3220 
3221 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3222 		if (err >= 0)
3223 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3224 					      MGMT_STATUS_INVALID_PARAMS);
3225 
3226 		goto failed;
3227 	}
3228 
3229 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3230 	if (!cmd) {
3231 		err = -ENOMEM;
3232 		goto failed;
3233 	}
3234 
3235 	cmd->cmd_complete = addr_cmd_complete;
3236 
3237 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3238 	reply.pin_len = cp->pin_len;
3239 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3240 
3241 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3242 	if (err < 0)
3243 		mgmt_pending_remove(cmd);
3244 
3245 failed:
3246 	hci_dev_unlock(hdev);
3247 	return err;
3248 }
3249 
3250 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3251 			     u16 len)
3252 {
3253 	struct mgmt_cp_set_io_capability *cp = data;
3254 
3255 	BT_DBG("");
3256 
3257 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3258 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3259 					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3260 
3261 	hci_dev_lock(hdev);
3262 
3263 	hdev->io_capability = cp->io_capability;
3264 
3265 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3266 	       hdev->io_capability);
3267 
3268 	hci_dev_unlock(hdev);
3269 
3270 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3271 				 NULL, 0);
3272 }
3273 
3274 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3275 {
3276 	struct hci_dev *hdev = conn->hdev;
3277 	struct mgmt_pending_cmd *cmd;
3278 
3279 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3280 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3281 			continue;
3282 
3283 		if (cmd->user_data != conn)
3284 			continue;
3285 
3286 		return cmd;
3287 	}
3288 
3289 	return NULL;
3290 }
3291 
3292 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3293 {
3294 	struct mgmt_rp_pair_device rp;
3295 	struct hci_conn *conn = cmd->user_data;
3296 	int err;
3297 
3298 	bacpy(&rp.addr.bdaddr, &conn->dst);
3299 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3300 
3301 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3302 				status, &rp, sizeof(rp));
3303 
3304 	/* So we don't get further callbacks for this connection */
3305 	conn->connect_cfm_cb = NULL;
3306 	conn->security_cfm_cb = NULL;
3307 	conn->disconn_cfm_cb = NULL;
3308 
3309 	hci_conn_drop(conn);
3310 
3311 	/* The device is paired so there is no need to remove
3312 	 * its connection parameters anymore.
3313 	 */
3314 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3315 
3316 	hci_conn_put(conn);
3317 
3318 	return err;
3319 }
3320 
3321 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3322 {
3323 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3324 	struct mgmt_pending_cmd *cmd;
3325 
3326 	cmd = find_pairing(conn);
3327 	if (cmd) {
3328 		cmd->cmd_complete(cmd, status);
3329 		mgmt_pending_remove(cmd);
3330 	}
3331 }
3332 
3333 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3334 {
3335 	struct mgmt_pending_cmd *cmd;
3336 
3337 	BT_DBG("status %u", status);
3338 
3339 	cmd = find_pairing(conn);
3340 	if (!cmd) {
3341 		BT_DBG("Unable to find a pending command");
3342 		return;
3343 	}
3344 
3345 	cmd->cmd_complete(cmd, mgmt_status(status));
3346 	mgmt_pending_remove(cmd);
3347 }
3348 
3349 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3350 {
3351 	struct mgmt_pending_cmd *cmd;
3352 
3353 	BT_DBG("status %u", status);
3354 
3355 	if (!status)
3356 		return;
3357 
3358 	cmd = find_pairing(conn);
3359 	if (!cmd) {
3360 		BT_DBG("Unable to find a pending command");
3361 		return;
3362 	}
3363 
3364 	cmd->cmd_complete(cmd, mgmt_status(status));
3365 	mgmt_pending_remove(cmd);
3366 }
3367 
3368 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3369 		       u16 len)
3370 {
3371 	struct mgmt_cp_pair_device *cp = data;
3372 	struct mgmt_rp_pair_device rp;
3373 	struct mgmt_pending_cmd *cmd;
3374 	u8 sec_level, auth_type;
3375 	struct hci_conn *conn;
3376 	int err;
3377 
3378 	BT_DBG("");
3379 
3380 	memset(&rp, 0, sizeof(rp));
3381 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3382 	rp.addr.type = cp->addr.type;
3383 
3384 	if (!bdaddr_type_is_valid(cp->addr.type))
3385 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3386 					 MGMT_STATUS_INVALID_PARAMS,
3387 					 &rp, sizeof(rp));
3388 
3389 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3390 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3391 					 MGMT_STATUS_INVALID_PARAMS,
3392 					 &rp, sizeof(rp));
3393 
3394 	hci_dev_lock(hdev);
3395 
3396 	if (!hdev_is_powered(hdev)) {
3397 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3398 					MGMT_STATUS_NOT_POWERED, &rp,
3399 					sizeof(rp));
3400 		goto unlock;
3401 	}
3402 
3403 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3404 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3405 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3406 					sizeof(rp));
3407 		goto unlock;
3408 	}
3409 
3410 	sec_level = BT_SECURITY_MEDIUM;
3411 	auth_type = HCI_AT_DEDICATED_BONDING;
3412 
3413 	if (cp->addr.type == BDADDR_BREDR) {
3414 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3415 				       auth_type);
3416 	} else {
3417 		u8 addr_type;
3418 
3419 		/* Convert from L2CAP channel address type to HCI address type
3420 		 */
3421 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3422 			addr_type = ADDR_LE_DEV_PUBLIC;
3423 		else
3424 			addr_type = ADDR_LE_DEV_RANDOM;
3425 
3426 		/* When pairing a new device, it is expected to remember
3427 		 * this device for future connections. Adding the connection
3428 		 * parameter information ahead of time allows tracking
3429 		 * of the slave preferred values and will speed up any
3430 		 * further connection establishment.
3431 		 *
3432 		 * If connection parameters already exist, then they
3433 		 * will be kept and this function does nothing.
3434 		 */
3435 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3436 
3437 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3438 				      sec_level, HCI_LE_CONN_TIMEOUT,
3439 				      HCI_ROLE_MASTER);
3440 	}
3441 
3442 	if (IS_ERR(conn)) {
3443 		int status;
3444 
3445 		if (PTR_ERR(conn) == -EBUSY)
3446 			status = MGMT_STATUS_BUSY;
3447 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3448 			status = MGMT_STATUS_NOT_SUPPORTED;
3449 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3450 			status = MGMT_STATUS_REJECTED;
3451 		else
3452 			status = MGMT_STATUS_CONNECT_FAILED;
3453 
3454 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3455 					status, &rp, sizeof(rp));
3456 		goto unlock;
3457 	}
3458 
3459 	if (conn->connect_cfm_cb) {
3460 		hci_conn_drop(conn);
3461 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3463 		goto unlock;
3464 	}
3465 
3466 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3467 	if (!cmd) {
3468 		err = -ENOMEM;
3469 		hci_conn_drop(conn);
3470 		goto unlock;
3471 	}
3472 
3473 	cmd->cmd_complete = pairing_complete;
3474 
3475 	/* For LE, just connecting isn't a proof that the pairing finished */
3476 	if (cp->addr.type == BDADDR_BREDR) {
3477 		conn->connect_cfm_cb = pairing_complete_cb;
3478 		conn->security_cfm_cb = pairing_complete_cb;
3479 		conn->disconn_cfm_cb = pairing_complete_cb;
3480 	} else {
3481 		conn->connect_cfm_cb = le_pairing_complete_cb;
3482 		conn->security_cfm_cb = le_pairing_complete_cb;
3483 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3484 	}
3485 
3486 	conn->io_capability = cp->io_cap;
3487 	cmd->user_data = hci_conn_get(conn);
3488 
3489 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3490 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3491 		cmd->cmd_complete(cmd, 0);
3492 		mgmt_pending_remove(cmd);
3493 	}
3494 
3495 	err = 0;
3496 
3497 unlock:
3498 	hci_dev_unlock(hdev);
3499 	return err;
3500 }
3501 
3502 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3503 			      u16 len)
3504 {
3505 	struct mgmt_addr_info *addr = data;
3506 	struct mgmt_pending_cmd *cmd;
3507 	struct hci_conn *conn;
3508 	int err;
3509 
3510 	BT_DBG("");
3511 
3512 	hci_dev_lock(hdev);
3513 
3514 	if (!hdev_is_powered(hdev)) {
3515 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3516 				      MGMT_STATUS_NOT_POWERED);
3517 		goto unlock;
3518 	}
3519 
3520 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3521 	if (!cmd) {
3522 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3523 				      MGMT_STATUS_INVALID_PARAMS);
3524 		goto unlock;
3525 	}
3526 
3527 	conn = cmd->user_data;
3528 
3529 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3530 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3531 				      MGMT_STATUS_INVALID_PARAMS);
3532 		goto unlock;
3533 	}
3534 
3535 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3536 	mgmt_pending_remove(cmd);
3537 
3538 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3539 				addr, sizeof(*addr));
3540 unlock:
3541 	hci_dev_unlock(hdev);
3542 	return err;
3543 }
3544 
3545 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3546 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3547 			     u16 hci_op, __le32 passkey)
3548 {
3549 	struct mgmt_pending_cmd *cmd;
3550 	struct hci_conn *conn;
3551 	int err;
3552 
3553 	hci_dev_lock(hdev);
3554 
3555 	if (!hdev_is_powered(hdev)) {
3556 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3557 					MGMT_STATUS_NOT_POWERED, addr,
3558 					sizeof(*addr));
3559 		goto done;
3560 	}
3561 
3562 	if (addr->type == BDADDR_BREDR)
3563 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3564 	else
3565 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3566 
3567 	if (!conn) {
3568 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3569 					MGMT_STATUS_NOT_CONNECTED, addr,
3570 					sizeof(*addr));
3571 		goto done;
3572 	}
3573 
3574 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3575 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3576 		if (!err)
3577 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3578 						MGMT_STATUS_SUCCESS, addr,
3579 						sizeof(*addr));
3580 		else
3581 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3582 						MGMT_STATUS_FAILED, addr,
3583 						sizeof(*addr));
3584 
3585 		goto done;
3586 	}
3587 
3588 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3589 	if (!cmd) {
3590 		err = -ENOMEM;
3591 		goto done;
3592 	}
3593 
3594 	cmd->cmd_complete = addr_cmd_complete;
3595 
3596 	/* Continue with pairing via HCI */
3597 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3598 		struct hci_cp_user_passkey_reply cp;
3599 
3600 		bacpy(&cp.bdaddr, &addr->bdaddr);
3601 		cp.passkey = passkey;
3602 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3603 	} else
3604 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3605 				   &addr->bdaddr);
3606 
3607 	if (err < 0)
3608 		mgmt_pending_remove(cmd);
3609 
3610 done:
3611 	hci_dev_unlock(hdev);
3612 	return err;
3613 }
3614 
3615 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3616 			      void *data, u16 len)
3617 {
3618 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3619 
3620 	BT_DBG("");
3621 
3622 	return user_pairing_resp(sk, hdev, &cp->addr,
3623 				MGMT_OP_PIN_CODE_NEG_REPLY,
3624 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3625 }
3626 
3627 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3628 			      u16 len)
3629 {
3630 	struct mgmt_cp_user_confirm_reply *cp = data;
3631 
3632 	BT_DBG("");
3633 
3634 	if (len != sizeof(*cp))
3635 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3636 				       MGMT_STATUS_INVALID_PARAMS);
3637 
3638 	return user_pairing_resp(sk, hdev, &cp->addr,
3639 				 MGMT_OP_USER_CONFIRM_REPLY,
3640 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3641 }
3642 
3643 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3644 				  void *data, u16 len)
3645 {
3646 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3647 
3648 	BT_DBG("");
3649 
3650 	return user_pairing_resp(sk, hdev, &cp->addr,
3651 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3652 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3653 }
3654 
3655 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3656 			      u16 len)
3657 {
3658 	struct mgmt_cp_user_passkey_reply *cp = data;
3659 
3660 	BT_DBG("");
3661 
3662 	return user_pairing_resp(sk, hdev, &cp->addr,
3663 				 MGMT_OP_USER_PASSKEY_REPLY,
3664 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3665 }
3666 
3667 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3668 				  void *data, u16 len)
3669 {
3670 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3671 
3672 	BT_DBG("");
3673 
3674 	return user_pairing_resp(sk, hdev, &cp->addr,
3675 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3676 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3677 }
3678 
3679 static void update_name(struct hci_request *req)
3680 {
3681 	struct hci_dev *hdev = req->hdev;
3682 	struct hci_cp_write_local_name cp;
3683 
3684 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3685 
3686 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3687 }
3688 
3689 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3690 {
3691 	struct mgmt_cp_set_local_name *cp;
3692 	struct mgmt_pending_cmd *cmd;
3693 
3694 	BT_DBG("status 0x%02x", status);
3695 
3696 	hci_dev_lock(hdev);
3697 
3698 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3699 	if (!cmd)
3700 		goto unlock;
3701 
3702 	cp = cmd->param;
3703 
3704 	if (status)
3705 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3706 			        mgmt_status(status));
3707 	else
3708 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3709 				  cp, sizeof(*cp));
3710 
3711 	mgmt_pending_remove(cmd);
3712 
3713 unlock:
3714 	hci_dev_unlock(hdev);
3715 }
3716 
3717 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3718 			  u16 len)
3719 {
3720 	struct mgmt_cp_set_local_name *cp = data;
3721 	struct mgmt_pending_cmd *cmd;
3722 	struct hci_request req;
3723 	int err;
3724 
3725 	BT_DBG("");
3726 
3727 	hci_dev_lock(hdev);
3728 
3729 	/* If the old values are the same as the new ones just return a
3730 	 * direct command complete event.
3731 	 */
3732 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3733 	    !memcmp(hdev->short_name, cp->short_name,
3734 		    sizeof(hdev->short_name))) {
3735 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3736 					data, len);
3737 		goto failed;
3738 	}
3739 
3740 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3741 
3742 	if (!hdev_is_powered(hdev)) {
3743 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3744 
3745 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3746 					data, len);
3747 		if (err < 0)
3748 			goto failed;
3749 
3750 		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3751 					 data, len, sk);
3752 
3753 		goto failed;
3754 	}
3755 
3756 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3757 	if (!cmd) {
3758 		err = -ENOMEM;
3759 		goto failed;
3760 	}
3761 
3762 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3763 
3764 	hci_req_init(&req, hdev);
3765 
3766 	if (lmp_bredr_capable(hdev)) {
3767 		update_name(&req);
3768 		update_eir(&req);
3769 	}
3770 
3771 	/* The name is stored in the scan response data and so
3772 	 * no need to udpate the advertising data here.
3773 	 */
3774 	if (lmp_le_capable(hdev))
3775 		update_scan_rsp_data(&req);
3776 
3777 	err = hci_req_run(&req, set_name_complete);
3778 	if (err < 0)
3779 		mgmt_pending_remove(cmd);
3780 
3781 failed:
3782 	hci_dev_unlock(hdev);
3783 	return err;
3784 }
3785 
3786 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3787 				         u16 opcode, struct sk_buff *skb)
3788 {
3789 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3790 	size_t rp_size = sizeof(mgmt_rp);
3791 	struct mgmt_pending_cmd *cmd;
3792 
3793 	BT_DBG("%s status %u", hdev->name, status);
3794 
3795 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3796 	if (!cmd)
3797 		return;
3798 
3799 	if (status || !skb) {
3800 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3801 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3802 		goto remove;
3803 	}
3804 
3805 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3806 
3807 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3808 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3809 
3810 		if (skb->len < sizeof(*rp)) {
3811 			mgmt_cmd_status(cmd->sk, hdev->id,
3812 					MGMT_OP_READ_LOCAL_OOB_DATA,
3813 					MGMT_STATUS_FAILED);
3814 			goto remove;
3815 		}
3816 
3817 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3818 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3819 
3820 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3821 	} else {
3822 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3823 
3824 		if (skb->len < sizeof(*rp)) {
3825 			mgmt_cmd_status(cmd->sk, hdev->id,
3826 					MGMT_OP_READ_LOCAL_OOB_DATA,
3827 					MGMT_STATUS_FAILED);
3828 			goto remove;
3829 		}
3830 
3831 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3832 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3833 
3834 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3835 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3836 	}
3837 
3838 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3839 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3840 
3841 remove:
3842 	mgmt_pending_remove(cmd);
3843 }
3844 
3845 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3846 			       void *data, u16 data_len)
3847 {
3848 	struct mgmt_pending_cmd *cmd;
3849 	struct hci_request req;
3850 	int err;
3851 
3852 	BT_DBG("%s", hdev->name);
3853 
3854 	hci_dev_lock(hdev);
3855 
3856 	if (!hdev_is_powered(hdev)) {
3857 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3858 				      MGMT_STATUS_NOT_POWERED);
3859 		goto unlock;
3860 	}
3861 
3862 	if (!lmp_ssp_capable(hdev)) {
3863 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3864 				      MGMT_STATUS_NOT_SUPPORTED);
3865 		goto unlock;
3866 	}
3867 
3868 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3869 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3870 				      MGMT_STATUS_BUSY);
3871 		goto unlock;
3872 	}
3873 
3874 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3875 	if (!cmd) {
3876 		err = -ENOMEM;
3877 		goto unlock;
3878 	}
3879 
3880 	hci_req_init(&req, hdev);
3881 
3882 	if (bredr_sc_enabled(hdev))
3883 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3884 	else
3885 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3886 
3887 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3888 	if (err < 0)
3889 		mgmt_pending_remove(cmd);
3890 
3891 unlock:
3892 	hci_dev_unlock(hdev);
3893 	return err;
3894 }
3895 
3896 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3897 			       void *data, u16 len)
3898 {
3899 	struct mgmt_addr_info *addr = data;
3900 	int err;
3901 
3902 	BT_DBG("%s ", hdev->name);
3903 
3904 	if (!bdaddr_type_is_valid(addr->type))
3905 		return mgmt_cmd_complete(sk, hdev->id,
3906 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3907 					 MGMT_STATUS_INVALID_PARAMS,
3908 					 addr, sizeof(*addr));
3909 
3910 	hci_dev_lock(hdev);
3911 
3912 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3913 		struct mgmt_cp_add_remote_oob_data *cp = data;
3914 		u8 status;
3915 
3916 		if (cp->addr.type != BDADDR_BREDR) {
3917 			err = mgmt_cmd_complete(sk, hdev->id,
3918 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3919 						MGMT_STATUS_INVALID_PARAMS,
3920 						&cp->addr, sizeof(cp->addr));
3921 			goto unlock;
3922 		}
3923 
3924 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3925 					      cp->addr.type, cp->hash,
3926 					      cp->rand, NULL, NULL);
3927 		if (err < 0)
3928 			status = MGMT_STATUS_FAILED;
3929 		else
3930 			status = MGMT_STATUS_SUCCESS;
3931 
3932 		err = mgmt_cmd_complete(sk, hdev->id,
3933 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3934 					&cp->addr, sizeof(cp->addr));
3935 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3936 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3937 		u8 *rand192, *hash192, *rand256, *hash256;
3938 		u8 status;
3939 
3940 		if (bdaddr_type_is_le(cp->addr.type)) {
3941 			/* Enforce zero-valued 192-bit parameters as
3942 			 * long as legacy SMP OOB isn't implemented.
3943 			 */
3944 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3945 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3946 				err = mgmt_cmd_complete(sk, hdev->id,
3947 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3948 							MGMT_STATUS_INVALID_PARAMS,
3949 							addr, sizeof(*addr));
3950 				goto unlock;
3951 			}
3952 
3953 			rand192 = NULL;
3954 			hash192 = NULL;
3955 		} else {
3956 			/* In case one of the P-192 values is set to zero,
3957 			 * then just disable OOB data for P-192.
3958 			 */
3959 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3960 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3961 				rand192 = NULL;
3962 				hash192 = NULL;
3963 			} else {
3964 				rand192 = cp->rand192;
3965 				hash192 = cp->hash192;
3966 			}
3967 		}
3968 
3969 		/* In case one of the P-256 values is set to zero, then just
3970 		 * disable OOB data for P-256.
3971 		 */
3972 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3973 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3974 			rand256 = NULL;
3975 			hash256 = NULL;
3976 		} else {
3977 			rand256 = cp->rand256;
3978 			hash256 = cp->hash256;
3979 		}
3980 
3981 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3982 					      cp->addr.type, hash192, rand192,
3983 					      hash256, rand256);
3984 		if (err < 0)
3985 			status = MGMT_STATUS_FAILED;
3986 		else
3987 			status = MGMT_STATUS_SUCCESS;
3988 
3989 		err = mgmt_cmd_complete(sk, hdev->id,
3990 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3991 					status, &cp->addr, sizeof(cp->addr));
3992 	} else {
3993 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3994 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3995 				      MGMT_STATUS_INVALID_PARAMS);
3996 	}
3997 
3998 unlock:
3999 	hci_dev_unlock(hdev);
4000 	return err;
4001 }
4002 
4003 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4004 				  void *data, u16 len)
4005 {
4006 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4007 	u8 status;
4008 	int err;
4009 
4010 	BT_DBG("%s", hdev->name);
4011 
4012 	if (cp->addr.type != BDADDR_BREDR)
4013 		return mgmt_cmd_complete(sk, hdev->id,
4014 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4015 					 MGMT_STATUS_INVALID_PARAMS,
4016 					 &cp->addr, sizeof(cp->addr));
4017 
4018 	hci_dev_lock(hdev);
4019 
4020 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4021 		hci_remote_oob_data_clear(hdev);
4022 		status = MGMT_STATUS_SUCCESS;
4023 		goto done;
4024 	}
4025 
4026 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4027 	if (err < 0)
4028 		status = MGMT_STATUS_INVALID_PARAMS;
4029 	else
4030 		status = MGMT_STATUS_SUCCESS;
4031 
4032 done:
4033 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4034 				status, &cp->addr, sizeof(cp->addr));
4035 
4036 	hci_dev_unlock(hdev);
4037 	return err;
4038 }
4039 
4040 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4041 {
4042 	struct hci_dev *hdev = req->hdev;
4043 	struct hci_cp_inquiry cp;
4044 	/* General inquiry access code (GIAC) */
4045 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
4046 
4047 	*status = mgmt_bredr_support(hdev);
4048 	if (*status)
4049 		return false;
4050 
4051 	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4052 		*status = MGMT_STATUS_BUSY;
4053 		return false;
4054 	}
4055 
4056 	hci_inquiry_cache_flush(hdev);
4057 
4058 	memset(&cp, 0, sizeof(cp));
4059 	memcpy(&cp.lap, lap, sizeof(cp.lap));
4060 	cp.length = DISCOV_BREDR_INQUIRY_LEN;
4061 
4062 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4063 
4064 	return true;
4065 }
4066 
4067 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4068 {
4069 	struct hci_dev *hdev = req->hdev;
4070 	struct hci_cp_le_set_scan_param param_cp;
4071 	struct hci_cp_le_set_scan_enable enable_cp;
4072 	u8 own_addr_type;
4073 	int err;
4074 
4075 	*status = mgmt_le_support(hdev);
4076 	if (*status)
4077 		return false;
4078 
4079 	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4080 		/* Don't let discovery abort an outgoing connection attempt
4081 		 * that's using directed advertising.
4082 		 */
4083 		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4084 			*status = MGMT_STATUS_REJECTED;
4085 			return false;
4086 		}
4087 
4088 		disable_advertising(req);
4089 	}
4090 
4091 	/* If controller is scanning, it means the background scanning is
4092 	 * running. Thus, we should temporarily stop it in order to set the
4093 	 * discovery scanning parameters.
4094 	 */
4095 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4096 		hci_req_add_le_scan_disable(req);
4097 
4098 	/* All active scans will be done with either a resolvable private
4099 	 * address (when privacy feature has been enabled) or non-resolvable
4100 	 * private address.
4101 	 */
4102 	err = hci_update_random_address(req, true, &own_addr_type);
4103 	if (err < 0) {
4104 		*status = MGMT_STATUS_FAILED;
4105 		return false;
4106 	}
4107 
4108 	memset(&param_cp, 0, sizeof(param_cp));
4109 	param_cp.type = LE_SCAN_ACTIVE;
4110 	param_cp.interval = cpu_to_le16(interval);
4111 	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4112 	param_cp.own_address_type = own_addr_type;
4113 
4114 	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4115 		    &param_cp);
4116 
4117 	memset(&enable_cp, 0, sizeof(enable_cp));
4118 	enable_cp.enable = LE_SCAN_ENABLE;
4119 	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4120 
4121 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4122 		    &enable_cp);
4123 
4124 	return true;
4125 }
4126 
4127 static bool trigger_discovery(struct hci_request *req, u8 *status)
4128 {
4129 	struct hci_dev *hdev = req->hdev;
4130 
4131 	switch (hdev->discovery.type) {
4132 	case DISCOV_TYPE_BREDR:
4133 		if (!trigger_bredr_inquiry(req, status))
4134 			return false;
4135 		break;
4136 
4137 	case DISCOV_TYPE_INTERLEAVED:
4138 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4139 			     &hdev->quirks)) {
4140 			/* During simultaneous discovery, we double LE scan
4141 			 * interval. We must leave some time for the controller
4142 			 * to do BR/EDR inquiry.
4143 			 */
4144 			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4145 					     status))
4146 				return false;
4147 
4148 			if (!trigger_bredr_inquiry(req, status))
4149 				return false;
4150 
4151 			return true;
4152 		}
4153 
4154 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4155 			*status = MGMT_STATUS_NOT_SUPPORTED;
4156 			return false;
4157 		}
4158 		/* fall through */
4159 
4160 	case DISCOV_TYPE_LE:
4161 		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4162 			return false;
4163 		break;
4164 
4165 	default:
4166 		*status = MGMT_STATUS_INVALID_PARAMS;
4167 		return false;
4168 	}
4169 
4170 	return true;
4171 }
4172 
4173 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4174 				     u16 opcode)
4175 {
4176 	struct mgmt_pending_cmd *cmd;
4177 	unsigned long timeout;
4178 
4179 	BT_DBG("status %d", status);
4180 
4181 	hci_dev_lock(hdev);
4182 
4183 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4184 	if (!cmd)
4185 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4186 
4187 	if (cmd) {
4188 		cmd->cmd_complete(cmd, mgmt_status(status));
4189 		mgmt_pending_remove(cmd);
4190 	}
4191 
4192 	if (status) {
4193 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4194 		goto unlock;
4195 	}
4196 
4197 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4198 
4199 	/* If the scan involves LE scan, pick proper timeout to schedule
4200 	 * hdev->le_scan_disable that will stop it.
4201 	 */
4202 	switch (hdev->discovery.type) {
4203 	case DISCOV_TYPE_LE:
4204 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4205 		break;
4206 	case DISCOV_TYPE_INTERLEAVED:
4207 		 /* When running simultaneous discovery, the LE scanning time
4208 		 * should occupy the whole discovery time sine BR/EDR inquiry
4209 		 * and LE scanning are scheduled by the controller.
4210 		 *
4211 		 * For interleaving discovery in comparison, BR/EDR inquiry
4212 		 * and LE scanning are done sequentially with separate
4213 		 * timeouts.
4214 		 */
4215 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4216 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4217 		else
4218 			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4219 		break;
4220 	case DISCOV_TYPE_BREDR:
4221 		timeout = 0;
4222 		break;
4223 	default:
4224 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4225 		timeout = 0;
4226 		break;
4227 	}
4228 
4229 	if (timeout) {
4230 		/* When service discovery is used and the controller has
4231 		 * a strict duplicate filter, it is important to remember
4232 		 * the start and duration of the scan. This is required
4233 		 * for restarting scanning during the discovery phase.
4234 		 */
4235 		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4236 			     &hdev->quirks) &&
4237 		    hdev->discovery.result_filtering) {
4238 			hdev->discovery.scan_start = jiffies;
4239 			hdev->discovery.scan_duration = timeout;
4240 		}
4241 
4242 		queue_delayed_work(hdev->workqueue,
4243 				   &hdev->le_scan_disable, timeout);
4244 	}
4245 
4246 unlock:
4247 	hci_dev_unlock(hdev);
4248 }
4249 
4250 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4251 			   void *data, u16 len)
4252 {
4253 	struct mgmt_cp_start_discovery *cp = data;
4254 	struct mgmt_pending_cmd *cmd;
4255 	struct hci_request req;
4256 	u8 status;
4257 	int err;
4258 
4259 	BT_DBG("%s", hdev->name);
4260 
4261 	hci_dev_lock(hdev);
4262 
4263 	if (!hdev_is_powered(hdev)) {
4264 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4265 					MGMT_STATUS_NOT_POWERED,
4266 					&cp->type, sizeof(cp->type));
4267 		goto failed;
4268 	}
4269 
4270 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4271 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4272 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4273 					MGMT_STATUS_BUSY, &cp->type,
4274 					sizeof(cp->type));
4275 		goto failed;
4276 	}
4277 
4278 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4279 	if (!cmd) {
4280 		err = -ENOMEM;
4281 		goto failed;
4282 	}
4283 
4284 	cmd->cmd_complete = generic_cmd_complete;
4285 
4286 	/* Clear the discovery filter first to free any previously
4287 	 * allocated memory for the UUID list.
4288 	 */
4289 	hci_discovery_filter_clear(hdev);
4290 
4291 	hdev->discovery.type = cp->type;
4292 	hdev->discovery.report_invalid_rssi = false;
4293 
4294 	hci_req_init(&req, hdev);
4295 
4296 	if (!trigger_discovery(&req, &status)) {
4297 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4298 					status, &cp->type, sizeof(cp->type));
4299 		mgmt_pending_remove(cmd);
4300 		goto failed;
4301 	}
4302 
4303 	err = hci_req_run(&req, start_discovery_complete);
4304 	if (err < 0) {
4305 		mgmt_pending_remove(cmd);
4306 		goto failed;
4307 	}
4308 
4309 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4310 
4311 failed:
4312 	hci_dev_unlock(hdev);
4313 	return err;
4314 }
4315 
4316 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4317 					  u8 status)
4318 {
4319 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4320 				 cmd->param, 1);
4321 }
4322 
4323 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4324 				   void *data, u16 len)
4325 {
4326 	struct mgmt_cp_start_service_discovery *cp = data;
4327 	struct mgmt_pending_cmd *cmd;
4328 	struct hci_request req;
4329 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4330 	u16 uuid_count, expected_len;
4331 	u8 status;
4332 	int err;
4333 
4334 	BT_DBG("%s", hdev->name);
4335 
4336 	hci_dev_lock(hdev);
4337 
4338 	if (!hdev_is_powered(hdev)) {
4339 		err = mgmt_cmd_complete(sk, hdev->id,
4340 					MGMT_OP_START_SERVICE_DISCOVERY,
4341 					MGMT_STATUS_NOT_POWERED,
4342 					&cp->type, sizeof(cp->type));
4343 		goto failed;
4344 	}
4345 
4346 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4347 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4348 		err = mgmt_cmd_complete(sk, hdev->id,
4349 					MGMT_OP_START_SERVICE_DISCOVERY,
4350 					MGMT_STATUS_BUSY, &cp->type,
4351 					sizeof(cp->type));
4352 		goto failed;
4353 	}
4354 
4355 	uuid_count = __le16_to_cpu(cp->uuid_count);
4356 	if (uuid_count > max_uuid_count) {
4357 		BT_ERR("service_discovery: too big uuid_count value %u",
4358 		       uuid_count);
4359 		err = mgmt_cmd_complete(sk, hdev->id,
4360 					MGMT_OP_START_SERVICE_DISCOVERY,
4361 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4362 					sizeof(cp->type));
4363 		goto failed;
4364 	}
4365 
4366 	expected_len = sizeof(*cp) + uuid_count * 16;
4367 	if (expected_len != len) {
4368 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4369 		       expected_len, len);
4370 		err = mgmt_cmd_complete(sk, hdev->id,
4371 					MGMT_OP_START_SERVICE_DISCOVERY,
4372 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4373 					sizeof(cp->type));
4374 		goto failed;
4375 	}
4376 
4377 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4378 			       hdev, data, len);
4379 	if (!cmd) {
4380 		err = -ENOMEM;
4381 		goto failed;
4382 	}
4383 
4384 	cmd->cmd_complete = service_discovery_cmd_complete;
4385 
4386 	/* Clear the discovery filter first to free any previously
4387 	 * allocated memory for the UUID list.
4388 	 */
4389 	hci_discovery_filter_clear(hdev);
4390 
4391 	hdev->discovery.result_filtering = true;
4392 	hdev->discovery.type = cp->type;
4393 	hdev->discovery.rssi = cp->rssi;
4394 	hdev->discovery.uuid_count = uuid_count;
4395 
4396 	if (uuid_count > 0) {
4397 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4398 						GFP_KERNEL);
4399 		if (!hdev->discovery.uuids) {
4400 			err = mgmt_cmd_complete(sk, hdev->id,
4401 						MGMT_OP_START_SERVICE_DISCOVERY,
4402 						MGMT_STATUS_FAILED,
4403 						&cp->type, sizeof(cp->type));
4404 			mgmt_pending_remove(cmd);
4405 			goto failed;
4406 		}
4407 	}
4408 
4409 	hci_req_init(&req, hdev);
4410 
4411 	if (!trigger_discovery(&req, &status)) {
4412 		err = mgmt_cmd_complete(sk, hdev->id,
4413 					MGMT_OP_START_SERVICE_DISCOVERY,
4414 					status, &cp->type, sizeof(cp->type));
4415 		mgmt_pending_remove(cmd);
4416 		goto failed;
4417 	}
4418 
4419 	err = hci_req_run(&req, start_discovery_complete);
4420 	if (err < 0) {
4421 		mgmt_pending_remove(cmd);
4422 		goto failed;
4423 	}
4424 
4425 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4426 
4427 failed:
4428 	hci_dev_unlock(hdev);
4429 	return err;
4430 }
4431 
4432 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4433 {
4434 	struct mgmt_pending_cmd *cmd;
4435 
4436 	BT_DBG("status %d", status);
4437 
4438 	hci_dev_lock(hdev);
4439 
4440 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4441 	if (cmd) {
4442 		cmd->cmd_complete(cmd, mgmt_status(status));
4443 		mgmt_pending_remove(cmd);
4444 	}
4445 
4446 	if (!status)
4447 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4448 
4449 	hci_dev_unlock(hdev);
4450 }
4451 
4452 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4453 			  u16 len)
4454 {
4455 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4456 	struct mgmt_pending_cmd *cmd;
4457 	struct hci_request req;
4458 	int err;
4459 
4460 	BT_DBG("%s", hdev->name);
4461 
4462 	hci_dev_lock(hdev);
4463 
4464 	if (!hci_discovery_active(hdev)) {
4465 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4466 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4467 					sizeof(mgmt_cp->type));
4468 		goto unlock;
4469 	}
4470 
4471 	if (hdev->discovery.type != mgmt_cp->type) {
4472 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4473 					MGMT_STATUS_INVALID_PARAMS,
4474 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4475 		goto unlock;
4476 	}
4477 
4478 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4479 	if (!cmd) {
4480 		err = -ENOMEM;
4481 		goto unlock;
4482 	}
4483 
4484 	cmd->cmd_complete = generic_cmd_complete;
4485 
4486 	hci_req_init(&req, hdev);
4487 
4488 	hci_stop_discovery(&req);
4489 
4490 	err = hci_req_run(&req, stop_discovery_complete);
4491 	if (!err) {
4492 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4493 		goto unlock;
4494 	}
4495 
4496 	mgmt_pending_remove(cmd);
4497 
4498 	/* If no HCI commands were sent we're done */
4499 	if (err == -ENODATA) {
4500 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4501 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4502 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4503 	}
4504 
4505 unlock:
4506 	hci_dev_unlock(hdev);
4507 	return err;
4508 }
4509 
4510 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4511 			u16 len)
4512 {
4513 	struct mgmt_cp_confirm_name *cp = data;
4514 	struct inquiry_entry *e;
4515 	int err;
4516 
4517 	BT_DBG("%s", hdev->name);
4518 
4519 	hci_dev_lock(hdev);
4520 
4521 	if (!hci_discovery_active(hdev)) {
4522 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4523 					MGMT_STATUS_FAILED, &cp->addr,
4524 					sizeof(cp->addr));
4525 		goto failed;
4526 	}
4527 
4528 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4529 	if (!e) {
4530 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4531 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4532 					sizeof(cp->addr));
4533 		goto failed;
4534 	}
4535 
4536 	if (cp->name_known) {
4537 		e->name_state = NAME_KNOWN;
4538 		list_del(&e->list);
4539 	} else {
4540 		e->name_state = NAME_NEEDED;
4541 		hci_inquiry_cache_update_resolve(hdev, e);
4542 	}
4543 
4544 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4545 				&cp->addr, sizeof(cp->addr));
4546 
4547 failed:
4548 	hci_dev_unlock(hdev);
4549 	return err;
4550 }
4551 
4552 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4553 			u16 len)
4554 {
4555 	struct mgmt_cp_block_device *cp = data;
4556 	u8 status;
4557 	int err;
4558 
4559 	BT_DBG("%s", hdev->name);
4560 
4561 	if (!bdaddr_type_is_valid(cp->addr.type))
4562 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4563 					 MGMT_STATUS_INVALID_PARAMS,
4564 					 &cp->addr, sizeof(cp->addr));
4565 
4566 	hci_dev_lock(hdev);
4567 
4568 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4569 				  cp->addr.type);
4570 	if (err < 0) {
4571 		status = MGMT_STATUS_FAILED;
4572 		goto done;
4573 	}
4574 
4575 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4576 		   sk);
4577 	status = MGMT_STATUS_SUCCESS;
4578 
4579 done:
4580 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4581 				&cp->addr, sizeof(cp->addr));
4582 
4583 	hci_dev_unlock(hdev);
4584 
4585 	return err;
4586 }
4587 
4588 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4589 			  u16 len)
4590 {
4591 	struct mgmt_cp_unblock_device *cp = data;
4592 	u8 status;
4593 	int err;
4594 
4595 	BT_DBG("%s", hdev->name);
4596 
4597 	if (!bdaddr_type_is_valid(cp->addr.type))
4598 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4599 					 MGMT_STATUS_INVALID_PARAMS,
4600 					 &cp->addr, sizeof(cp->addr));
4601 
4602 	hci_dev_lock(hdev);
4603 
4604 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4605 				  cp->addr.type);
4606 	if (err < 0) {
4607 		status = MGMT_STATUS_INVALID_PARAMS;
4608 		goto done;
4609 	}
4610 
4611 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4612 		   sk);
4613 	status = MGMT_STATUS_SUCCESS;
4614 
4615 done:
4616 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4617 				&cp->addr, sizeof(cp->addr));
4618 
4619 	hci_dev_unlock(hdev);
4620 
4621 	return err;
4622 }
4623 
4624 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4625 			 u16 len)
4626 {
4627 	struct mgmt_cp_set_device_id *cp = data;
4628 	struct hci_request req;
4629 	int err;
4630 	__u16 source;
4631 
4632 	BT_DBG("%s", hdev->name);
4633 
4634 	source = __le16_to_cpu(cp->source);
4635 
4636 	if (source > 0x0002)
4637 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4638 				       MGMT_STATUS_INVALID_PARAMS);
4639 
4640 	hci_dev_lock(hdev);
4641 
4642 	hdev->devid_source = source;
4643 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4644 	hdev->devid_product = __le16_to_cpu(cp->product);
4645 	hdev->devid_version = __le16_to_cpu(cp->version);
4646 
4647 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4648 				NULL, 0);
4649 
4650 	hci_req_init(&req, hdev);
4651 	update_eir(&req);
4652 	hci_req_run(&req, NULL);
4653 
4654 	hci_dev_unlock(hdev);
4655 
4656 	return err;
4657 }
4658 
4659 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4660 					u16 opcode)
4661 {
4662 	BT_DBG("status %d", status);
4663 }
4664 
4665 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4666 				     u16 opcode)
4667 {
4668 	struct cmd_lookup match = { NULL, hdev };
4669 	struct hci_request req;
4670 
4671 	hci_dev_lock(hdev);
4672 
4673 	if (status) {
4674 		u8 mgmt_err = mgmt_status(status);
4675 
4676 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4677 				     cmd_status_rsp, &mgmt_err);
4678 		goto unlock;
4679 	}
4680 
4681 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4682 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4683 	else
4684 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4685 
4686 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4687 			     &match);
4688 
4689 	new_settings(hdev, match.sk);
4690 
4691 	if (match.sk)
4692 		sock_put(match.sk);
4693 
4694 	/* If "Set Advertising" was just disabled and instance advertising was
4695 	 * set up earlier, then enable the advertising instance.
4696 	 */
4697 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4698 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4699 		goto unlock;
4700 
4701 	hci_req_init(&req, hdev);
4702 
4703 	update_adv_data(&req);
4704 	enable_advertising(&req);
4705 
4706 	if (hci_req_run(&req, enable_advertising_instance) < 0)
4707 		BT_ERR("Failed to re-configure advertising");
4708 
4709 unlock:
4710 	hci_dev_unlock(hdev);
4711 }
4712 
4713 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4714 			   u16 len)
4715 {
4716 	struct mgmt_mode *cp = data;
4717 	struct mgmt_pending_cmd *cmd;
4718 	struct hci_request req;
4719 	u8 val, status;
4720 	int err;
4721 
4722 	BT_DBG("request for %s", hdev->name);
4723 
4724 	status = mgmt_le_support(hdev);
4725 	if (status)
4726 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4727 				       status);
4728 
4729 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4730 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4731 				       MGMT_STATUS_INVALID_PARAMS);
4732 
4733 	hci_dev_lock(hdev);
4734 
4735 	val = !!cp->val;
4736 
4737 	/* The following conditions are ones which mean that we should
4738 	 * not do any HCI communication but directly send a mgmt
4739 	 * response to user space (after toggling the flag if
4740 	 * necessary).
4741 	 */
4742 	if (!hdev_is_powered(hdev) ||
4743 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4744 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4745 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4746 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4747 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4748 		bool changed;
4749 
4750 		if (cp->val) {
4751 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4752 			if (cp->val == 0x02)
4753 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4754 			else
4755 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4756 		} else {
4757 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4758 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4759 		}
4760 
4761 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4762 		if (err < 0)
4763 			goto unlock;
4764 
4765 		if (changed)
4766 			err = new_settings(hdev, sk);
4767 
4768 		goto unlock;
4769 	}
4770 
4771 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4772 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4773 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4774 				      MGMT_STATUS_BUSY);
4775 		goto unlock;
4776 	}
4777 
4778 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4779 	if (!cmd) {
4780 		err = -ENOMEM;
4781 		goto unlock;
4782 	}
4783 
4784 	hci_req_init(&req, hdev);
4785 
4786 	if (cp->val == 0x02)
4787 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4788 	else
4789 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4790 
4791 	if (val) {
4792 		/* Switch to instance "0" for the Set Advertising setting. */
4793 		update_adv_data_for_instance(&req, 0);
4794 		update_scan_rsp_data_for_instance(&req, 0);
4795 		enable_advertising(&req);
4796 	} else {
4797 		disable_advertising(&req);
4798 	}
4799 
4800 	err = hci_req_run(&req, set_advertising_complete);
4801 	if (err < 0)
4802 		mgmt_pending_remove(cmd);
4803 
4804 unlock:
4805 	hci_dev_unlock(hdev);
4806 	return err;
4807 }
4808 
4809 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4810 			      void *data, u16 len)
4811 {
4812 	struct mgmt_cp_set_static_address *cp = data;
4813 	int err;
4814 
4815 	BT_DBG("%s", hdev->name);
4816 
4817 	if (!lmp_le_capable(hdev))
4818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4819 				       MGMT_STATUS_NOT_SUPPORTED);
4820 
4821 	if (hdev_is_powered(hdev))
4822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4823 				       MGMT_STATUS_REJECTED);
4824 
4825 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4826 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4827 			return mgmt_cmd_status(sk, hdev->id,
4828 					       MGMT_OP_SET_STATIC_ADDRESS,
4829 					       MGMT_STATUS_INVALID_PARAMS);
4830 
4831 		/* Two most significant bits shall be set */
4832 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4833 			return mgmt_cmd_status(sk, hdev->id,
4834 					       MGMT_OP_SET_STATIC_ADDRESS,
4835 					       MGMT_STATUS_INVALID_PARAMS);
4836 	}
4837 
4838 	hci_dev_lock(hdev);
4839 
4840 	bacpy(&hdev->static_addr, &cp->bdaddr);
4841 
4842 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4843 	if (err < 0)
4844 		goto unlock;
4845 
4846 	err = new_settings(hdev, sk);
4847 
4848 unlock:
4849 	hci_dev_unlock(hdev);
4850 	return err;
4851 }
4852 
4853 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4854 			   void *data, u16 len)
4855 {
4856 	struct mgmt_cp_set_scan_params *cp = data;
4857 	__u16 interval, window;
4858 	int err;
4859 
4860 	BT_DBG("%s", hdev->name);
4861 
4862 	if (!lmp_le_capable(hdev))
4863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4864 				       MGMT_STATUS_NOT_SUPPORTED);
4865 
4866 	interval = __le16_to_cpu(cp->interval);
4867 
4868 	if (interval < 0x0004 || interval > 0x4000)
4869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4870 				       MGMT_STATUS_INVALID_PARAMS);
4871 
4872 	window = __le16_to_cpu(cp->window);
4873 
4874 	if (window < 0x0004 || window > 0x4000)
4875 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4876 				       MGMT_STATUS_INVALID_PARAMS);
4877 
4878 	if (window > interval)
4879 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4880 				       MGMT_STATUS_INVALID_PARAMS);
4881 
4882 	hci_dev_lock(hdev);
4883 
4884 	hdev->le_scan_interval = interval;
4885 	hdev->le_scan_window = window;
4886 
4887 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4888 				NULL, 0);
4889 
4890 	/* If background scan is running, restart it so new parameters are
4891 	 * loaded.
4892 	 */
4893 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4894 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4895 		struct hci_request req;
4896 
4897 		hci_req_init(&req, hdev);
4898 
4899 		hci_req_add_le_scan_disable(&req);
4900 		hci_req_add_le_passive_scan(&req);
4901 
4902 		hci_req_run(&req, NULL);
4903 	}
4904 
4905 	hci_dev_unlock(hdev);
4906 
4907 	return err;
4908 }
4909 
4910 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4911 				      u16 opcode)
4912 {
4913 	struct mgmt_pending_cmd *cmd;
4914 
4915 	BT_DBG("status 0x%02x", status);
4916 
4917 	hci_dev_lock(hdev);
4918 
4919 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4920 	if (!cmd)
4921 		goto unlock;
4922 
4923 	if (status) {
4924 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4925 			        mgmt_status(status));
4926 	} else {
4927 		struct mgmt_mode *cp = cmd->param;
4928 
4929 		if (cp->val)
4930 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4931 		else
4932 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4933 
4934 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4935 		new_settings(hdev, cmd->sk);
4936 	}
4937 
4938 	mgmt_pending_remove(cmd);
4939 
4940 unlock:
4941 	hci_dev_unlock(hdev);
4942 }
4943 
4944 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4945 				void *data, u16 len)
4946 {
4947 	struct mgmt_mode *cp = data;
4948 	struct mgmt_pending_cmd *cmd;
4949 	struct hci_request req;
4950 	int err;
4951 
4952 	BT_DBG("%s", hdev->name);
4953 
4954 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4955 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4956 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4957 				       MGMT_STATUS_NOT_SUPPORTED);
4958 
4959 	if (cp->val != 0x00 && cp->val != 0x01)
4960 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4961 				       MGMT_STATUS_INVALID_PARAMS);
4962 
4963 	hci_dev_lock(hdev);
4964 
4965 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4966 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4967 				      MGMT_STATUS_BUSY);
4968 		goto unlock;
4969 	}
4970 
4971 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4972 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4973 					hdev);
4974 		goto unlock;
4975 	}
4976 
4977 	if (!hdev_is_powered(hdev)) {
4978 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4979 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4980 					hdev);
4981 		new_settings(hdev, sk);
4982 		goto unlock;
4983 	}
4984 
4985 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4986 			       data, len);
4987 	if (!cmd) {
4988 		err = -ENOMEM;
4989 		goto unlock;
4990 	}
4991 
4992 	hci_req_init(&req, hdev);
4993 
4994 	write_fast_connectable(&req, cp->val);
4995 
4996 	err = hci_req_run(&req, fast_connectable_complete);
4997 	if (err < 0) {
4998 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4999 				      MGMT_STATUS_FAILED);
5000 		mgmt_pending_remove(cmd);
5001 	}
5002 
5003 unlock:
5004 	hci_dev_unlock(hdev);
5005 
5006 	return err;
5007 }
5008 
5009 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5010 {
5011 	struct mgmt_pending_cmd *cmd;
5012 
5013 	BT_DBG("status 0x%02x", status);
5014 
5015 	hci_dev_lock(hdev);
5016 
5017 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5018 	if (!cmd)
5019 		goto unlock;
5020 
5021 	if (status) {
5022 		u8 mgmt_err = mgmt_status(status);
5023 
5024 		/* We need to restore the flag if related HCI commands
5025 		 * failed.
5026 		 */
5027 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5028 
5029 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5030 	} else {
5031 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5032 		new_settings(hdev, cmd->sk);
5033 	}
5034 
5035 	mgmt_pending_remove(cmd);
5036 
5037 unlock:
5038 	hci_dev_unlock(hdev);
5039 }
5040 
5041 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5042 {
5043 	struct mgmt_mode *cp = data;
5044 	struct mgmt_pending_cmd *cmd;
5045 	struct hci_request req;
5046 	int err;
5047 
5048 	BT_DBG("request for %s", hdev->name);
5049 
5050 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5051 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5052 				       MGMT_STATUS_NOT_SUPPORTED);
5053 
5054 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5055 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5056 				       MGMT_STATUS_REJECTED);
5057 
5058 	if (cp->val != 0x00 && cp->val != 0x01)
5059 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5060 				       MGMT_STATUS_INVALID_PARAMS);
5061 
5062 	hci_dev_lock(hdev);
5063 
5064 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5065 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5066 		goto unlock;
5067 	}
5068 
5069 	if (!hdev_is_powered(hdev)) {
5070 		if (!cp->val) {
5071 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5072 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5073 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5074 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5075 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5076 		}
5077 
5078 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5079 
5080 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5081 		if (err < 0)
5082 			goto unlock;
5083 
5084 		err = new_settings(hdev, sk);
5085 		goto unlock;
5086 	}
5087 
5088 	/* Reject disabling when powered on */
5089 	if (!cp->val) {
5090 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5091 				      MGMT_STATUS_REJECTED);
5092 		goto unlock;
5093 	} else {
5094 		/* When configuring a dual-mode controller to operate
5095 		 * with LE only and using a static address, then switching
5096 		 * BR/EDR back on is not allowed.
5097 		 *
5098 		 * Dual-mode controllers shall operate with the public
5099 		 * address as its identity address for BR/EDR and LE. So
5100 		 * reject the attempt to create an invalid configuration.
5101 		 *
5102 		 * The same restrictions applies when secure connections
5103 		 * has been enabled. For BR/EDR this is a controller feature
5104 		 * while for LE it is a host stack feature. This means that
5105 		 * switching BR/EDR back on when secure connections has been
5106 		 * enabled is not a supported transaction.
5107 		 */
5108 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5109 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5110 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5111 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5112 					      MGMT_STATUS_REJECTED);
5113 			goto unlock;
5114 		}
5115 	}
5116 
5117 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5118 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5119 				      MGMT_STATUS_BUSY);
5120 		goto unlock;
5121 	}
5122 
5123 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5124 	if (!cmd) {
5125 		err = -ENOMEM;
5126 		goto unlock;
5127 	}
5128 
5129 	/* We need to flip the bit already here so that update_adv_data
5130 	 * generates the correct flags.
5131 	 */
5132 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5133 
5134 	hci_req_init(&req, hdev);
5135 
5136 	write_fast_connectable(&req, false);
5137 	__hci_update_page_scan(&req);
5138 
5139 	/* Since only the advertising data flags will change, there
5140 	 * is no need to update the scan response data.
5141 	 */
5142 	update_adv_data(&req);
5143 
5144 	err = hci_req_run(&req, set_bredr_complete);
5145 	if (err < 0)
5146 		mgmt_pending_remove(cmd);
5147 
5148 unlock:
5149 	hci_dev_unlock(hdev);
5150 	return err;
5151 }
5152 
5153 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5154 {
5155 	struct mgmt_pending_cmd *cmd;
5156 	struct mgmt_mode *cp;
5157 
5158 	BT_DBG("%s status %u", hdev->name, status);
5159 
5160 	hci_dev_lock(hdev);
5161 
5162 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5163 	if (!cmd)
5164 		goto unlock;
5165 
5166 	if (status) {
5167 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5168 			        mgmt_status(status));
5169 		goto remove;
5170 	}
5171 
5172 	cp = cmd->param;
5173 
5174 	switch (cp->val) {
5175 	case 0x00:
5176 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5177 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5178 		break;
5179 	case 0x01:
5180 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5181 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5182 		break;
5183 	case 0x02:
5184 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5185 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5186 		break;
5187 	}
5188 
5189 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5190 	new_settings(hdev, cmd->sk);
5191 
5192 remove:
5193 	mgmt_pending_remove(cmd);
5194 unlock:
5195 	hci_dev_unlock(hdev);
5196 }
5197 
5198 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5199 			   void *data, u16 len)
5200 {
5201 	struct mgmt_mode *cp = data;
5202 	struct mgmt_pending_cmd *cmd;
5203 	struct hci_request req;
5204 	u8 val;
5205 	int err;
5206 
5207 	BT_DBG("request for %s", hdev->name);
5208 
5209 	if (!lmp_sc_capable(hdev) &&
5210 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5211 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5212 				       MGMT_STATUS_NOT_SUPPORTED);
5213 
5214 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5215 	    lmp_sc_capable(hdev) &&
5216 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5217 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5218 				       MGMT_STATUS_REJECTED);
5219 
5220 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5221 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5222 				  MGMT_STATUS_INVALID_PARAMS);
5223 
5224 	hci_dev_lock(hdev);
5225 
5226 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5227 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5228 		bool changed;
5229 
5230 		if (cp->val) {
5231 			changed = !hci_dev_test_and_set_flag(hdev,
5232 							     HCI_SC_ENABLED);
5233 			if (cp->val == 0x02)
5234 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5235 			else
5236 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5237 		} else {
5238 			changed = hci_dev_test_and_clear_flag(hdev,
5239 							      HCI_SC_ENABLED);
5240 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5241 		}
5242 
5243 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5244 		if (err < 0)
5245 			goto failed;
5246 
5247 		if (changed)
5248 			err = new_settings(hdev, sk);
5249 
5250 		goto failed;
5251 	}
5252 
5253 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5254 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5255 				      MGMT_STATUS_BUSY);
5256 		goto failed;
5257 	}
5258 
5259 	val = !!cp->val;
5260 
5261 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5262 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5263 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5264 		goto failed;
5265 	}
5266 
5267 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5268 	if (!cmd) {
5269 		err = -ENOMEM;
5270 		goto failed;
5271 	}
5272 
5273 	hci_req_init(&req, hdev);
5274 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5275 	err = hci_req_run(&req, sc_enable_complete);
5276 	if (err < 0) {
5277 		mgmt_pending_remove(cmd);
5278 		goto failed;
5279 	}
5280 
5281 failed:
5282 	hci_dev_unlock(hdev);
5283 	return err;
5284 }
5285 
5286 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5287 			  void *data, u16 len)
5288 {
5289 	struct mgmt_mode *cp = data;
5290 	bool changed, use_changed;
5291 	int err;
5292 
5293 	BT_DBG("request for %s", hdev->name);
5294 
5295 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5296 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5297 				       MGMT_STATUS_INVALID_PARAMS);
5298 
5299 	hci_dev_lock(hdev);
5300 
5301 	if (cp->val)
5302 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5303 	else
5304 		changed = hci_dev_test_and_clear_flag(hdev,
5305 						      HCI_KEEP_DEBUG_KEYS);
5306 
5307 	if (cp->val == 0x02)
5308 		use_changed = !hci_dev_test_and_set_flag(hdev,
5309 							 HCI_USE_DEBUG_KEYS);
5310 	else
5311 		use_changed = hci_dev_test_and_clear_flag(hdev,
5312 							  HCI_USE_DEBUG_KEYS);
5313 
5314 	if (hdev_is_powered(hdev) && use_changed &&
5315 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5316 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5317 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5318 			     sizeof(mode), &mode);
5319 	}
5320 
5321 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5322 	if (err < 0)
5323 		goto unlock;
5324 
5325 	if (changed)
5326 		err = new_settings(hdev, sk);
5327 
5328 unlock:
5329 	hci_dev_unlock(hdev);
5330 	return err;
5331 }
5332 
5333 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5334 		       u16 len)
5335 {
5336 	struct mgmt_cp_set_privacy *cp = cp_data;
5337 	bool changed;
5338 	int err;
5339 
5340 	BT_DBG("request for %s", hdev->name);
5341 
5342 	if (!lmp_le_capable(hdev))
5343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5344 				       MGMT_STATUS_NOT_SUPPORTED);
5345 
5346 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5348 				       MGMT_STATUS_INVALID_PARAMS);
5349 
5350 	if (hdev_is_powered(hdev))
5351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5352 				       MGMT_STATUS_REJECTED);
5353 
5354 	hci_dev_lock(hdev);
5355 
5356 	/* If user space supports this command it is also expected to
5357 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5358 	 */
5359 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5360 
5361 	if (cp->privacy) {
5362 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5363 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5364 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5365 	} else {
5366 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5367 		memset(hdev->irk, 0, sizeof(hdev->irk));
5368 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5369 	}
5370 
5371 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5372 	if (err < 0)
5373 		goto unlock;
5374 
5375 	if (changed)
5376 		err = new_settings(hdev, sk);
5377 
5378 unlock:
5379 	hci_dev_unlock(hdev);
5380 	return err;
5381 }
5382 
5383 static bool irk_is_valid(struct mgmt_irk_info *irk)
5384 {
5385 	switch (irk->addr.type) {
5386 	case BDADDR_LE_PUBLIC:
5387 		return true;
5388 
5389 	case BDADDR_LE_RANDOM:
5390 		/* Two most significant bits shall be set */
5391 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5392 			return false;
5393 		return true;
5394 	}
5395 
5396 	return false;
5397 }
5398 
5399 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5400 		     u16 len)
5401 {
5402 	struct mgmt_cp_load_irks *cp = cp_data;
5403 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5404 				   sizeof(struct mgmt_irk_info));
5405 	u16 irk_count, expected_len;
5406 	int i, err;
5407 
5408 	BT_DBG("request for %s", hdev->name);
5409 
5410 	if (!lmp_le_capable(hdev))
5411 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5412 				       MGMT_STATUS_NOT_SUPPORTED);
5413 
5414 	irk_count = __le16_to_cpu(cp->irk_count);
5415 	if (irk_count > max_irk_count) {
5416 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5417 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5418 				       MGMT_STATUS_INVALID_PARAMS);
5419 	}
5420 
5421 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5422 	if (expected_len != len) {
5423 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5424 		       expected_len, len);
5425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5426 				       MGMT_STATUS_INVALID_PARAMS);
5427 	}
5428 
5429 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5430 
5431 	for (i = 0; i < irk_count; i++) {
5432 		struct mgmt_irk_info *key = &cp->irks[i];
5433 
5434 		if (!irk_is_valid(key))
5435 			return mgmt_cmd_status(sk, hdev->id,
5436 					       MGMT_OP_LOAD_IRKS,
5437 					       MGMT_STATUS_INVALID_PARAMS);
5438 	}
5439 
5440 	hci_dev_lock(hdev);
5441 
5442 	hci_smp_irks_clear(hdev);
5443 
5444 	for (i = 0; i < irk_count; i++) {
5445 		struct mgmt_irk_info *irk = &cp->irks[i];
5446 		u8 addr_type;
5447 
5448 		if (irk->addr.type == BDADDR_LE_PUBLIC)
5449 			addr_type = ADDR_LE_DEV_PUBLIC;
5450 		else
5451 			addr_type = ADDR_LE_DEV_RANDOM;
5452 
5453 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5454 			    BDADDR_ANY);
5455 	}
5456 
5457 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5458 
5459 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5460 
5461 	hci_dev_unlock(hdev);
5462 
5463 	return err;
5464 }
5465 
5466 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5467 {
5468 	if (key->master != 0x00 && key->master != 0x01)
5469 		return false;
5470 
5471 	switch (key->addr.type) {
5472 	case BDADDR_LE_PUBLIC:
5473 		return true;
5474 
5475 	case BDADDR_LE_RANDOM:
5476 		/* Two most significant bits shall be set */
5477 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5478 			return false;
5479 		return true;
5480 	}
5481 
5482 	return false;
5483 }
5484 
5485 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5486 			       void *cp_data, u16 len)
5487 {
5488 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5489 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5490 				   sizeof(struct mgmt_ltk_info));
5491 	u16 key_count, expected_len;
5492 	int i, err;
5493 
5494 	BT_DBG("request for %s", hdev->name);
5495 
5496 	if (!lmp_le_capable(hdev))
5497 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5498 				       MGMT_STATUS_NOT_SUPPORTED);
5499 
5500 	key_count = __le16_to_cpu(cp->key_count);
5501 	if (key_count > max_key_count) {
5502 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5503 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5504 				       MGMT_STATUS_INVALID_PARAMS);
5505 	}
5506 
5507 	expected_len = sizeof(*cp) + key_count *
5508 					sizeof(struct mgmt_ltk_info);
5509 	if (expected_len != len) {
5510 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5511 		       expected_len, len);
5512 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5513 				       MGMT_STATUS_INVALID_PARAMS);
5514 	}
5515 
5516 	BT_DBG("%s key_count %u", hdev->name, key_count);
5517 
5518 	for (i = 0; i < key_count; i++) {
5519 		struct mgmt_ltk_info *key = &cp->keys[i];
5520 
5521 		if (!ltk_is_valid(key))
5522 			return mgmt_cmd_status(sk, hdev->id,
5523 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5524 					       MGMT_STATUS_INVALID_PARAMS);
5525 	}
5526 
5527 	hci_dev_lock(hdev);
5528 
5529 	hci_smp_ltks_clear(hdev);
5530 
5531 	for (i = 0; i < key_count; i++) {
5532 		struct mgmt_ltk_info *key = &cp->keys[i];
5533 		u8 type, addr_type, authenticated;
5534 
5535 		if (key->addr.type == BDADDR_LE_PUBLIC)
5536 			addr_type = ADDR_LE_DEV_PUBLIC;
5537 		else
5538 			addr_type = ADDR_LE_DEV_RANDOM;
5539 
5540 		switch (key->type) {
5541 		case MGMT_LTK_UNAUTHENTICATED:
5542 			authenticated = 0x00;
5543 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5544 			break;
5545 		case MGMT_LTK_AUTHENTICATED:
5546 			authenticated = 0x01;
5547 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5548 			break;
5549 		case MGMT_LTK_P256_UNAUTH:
5550 			authenticated = 0x00;
5551 			type = SMP_LTK_P256;
5552 			break;
5553 		case MGMT_LTK_P256_AUTH:
5554 			authenticated = 0x01;
5555 			type = SMP_LTK_P256;
5556 			break;
5557 		case MGMT_LTK_P256_DEBUG:
5558 			authenticated = 0x00;
5559 			type = SMP_LTK_P256_DEBUG;
5560 		default:
5561 			continue;
5562 		}
5563 
5564 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5565 			    authenticated, key->val, key->enc_size, key->ediv,
5566 			    key->rand);
5567 	}
5568 
5569 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5570 			   NULL, 0);
5571 
5572 	hci_dev_unlock(hdev);
5573 
5574 	return err;
5575 }
5576 
5577 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5578 {
5579 	struct hci_conn *conn = cmd->user_data;
5580 	struct mgmt_rp_get_conn_info rp;
5581 	int err;
5582 
5583 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5584 
5585 	if (status == MGMT_STATUS_SUCCESS) {
5586 		rp.rssi = conn->rssi;
5587 		rp.tx_power = conn->tx_power;
5588 		rp.max_tx_power = conn->max_tx_power;
5589 	} else {
5590 		rp.rssi = HCI_RSSI_INVALID;
5591 		rp.tx_power = HCI_TX_POWER_INVALID;
5592 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5593 	}
5594 
5595 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5596 				status, &rp, sizeof(rp));
5597 
5598 	hci_conn_drop(conn);
5599 	hci_conn_put(conn);
5600 
5601 	return err;
5602 }
5603 
5604 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5605 				       u16 opcode)
5606 {
5607 	struct hci_cp_read_rssi *cp;
5608 	struct mgmt_pending_cmd *cmd;
5609 	struct hci_conn *conn;
5610 	u16 handle;
5611 	u8 status;
5612 
5613 	BT_DBG("status 0x%02x", hci_status);
5614 
5615 	hci_dev_lock(hdev);
5616 
5617 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5618 	 * Level so we check which one was last sent to retrieve connection
5619 	 * handle.  Both commands have handle as first parameter so it's safe to
5620 	 * cast data on the same command struct.
5621 	 *
5622 	 * First command sent is always Read RSSI and we fail only if it fails.
5623 	 * In other case we simply override error to indicate success as we
5624 	 * already remembered if TX power value is actually valid.
5625 	 */
5626 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5627 	if (!cp) {
5628 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5629 		status = MGMT_STATUS_SUCCESS;
5630 	} else {
5631 		status = mgmt_status(hci_status);
5632 	}
5633 
5634 	if (!cp) {
5635 		BT_ERR("invalid sent_cmd in conn_info response");
5636 		goto unlock;
5637 	}
5638 
5639 	handle = __le16_to_cpu(cp->handle);
5640 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5641 	if (!conn) {
5642 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5643 		goto unlock;
5644 	}
5645 
5646 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5647 	if (!cmd)
5648 		goto unlock;
5649 
5650 	cmd->cmd_complete(cmd, status);
5651 	mgmt_pending_remove(cmd);
5652 
5653 unlock:
5654 	hci_dev_unlock(hdev);
5655 }
5656 
5657 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5658 			 u16 len)
5659 {
5660 	struct mgmt_cp_get_conn_info *cp = data;
5661 	struct mgmt_rp_get_conn_info rp;
5662 	struct hci_conn *conn;
5663 	unsigned long conn_info_age;
5664 	int err = 0;
5665 
5666 	BT_DBG("%s", hdev->name);
5667 
5668 	memset(&rp, 0, sizeof(rp));
5669 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5670 	rp.addr.type = cp->addr.type;
5671 
5672 	if (!bdaddr_type_is_valid(cp->addr.type))
5673 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5674 					 MGMT_STATUS_INVALID_PARAMS,
5675 					 &rp, sizeof(rp));
5676 
5677 	hci_dev_lock(hdev);
5678 
5679 	if (!hdev_is_powered(hdev)) {
5680 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5681 					MGMT_STATUS_NOT_POWERED, &rp,
5682 					sizeof(rp));
5683 		goto unlock;
5684 	}
5685 
5686 	if (cp->addr.type == BDADDR_BREDR)
5687 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5688 					       &cp->addr.bdaddr);
5689 	else
5690 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5691 
5692 	if (!conn || conn->state != BT_CONNECTED) {
5693 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5694 					MGMT_STATUS_NOT_CONNECTED, &rp,
5695 					sizeof(rp));
5696 		goto unlock;
5697 	}
5698 
5699 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5700 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5701 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5702 		goto unlock;
5703 	}
5704 
5705 	/* To avoid client trying to guess when to poll again for information we
5706 	 * calculate conn info age as random value between min/max set in hdev.
5707 	 */
5708 	conn_info_age = hdev->conn_info_min_age +
5709 			prandom_u32_max(hdev->conn_info_max_age -
5710 					hdev->conn_info_min_age);
5711 
5712 	/* Query controller to refresh cached values if they are too old or were
5713 	 * never read.
5714 	 */
5715 	if (time_after(jiffies, conn->conn_info_timestamp +
5716 		       msecs_to_jiffies(conn_info_age)) ||
5717 	    !conn->conn_info_timestamp) {
5718 		struct hci_request req;
5719 		struct hci_cp_read_tx_power req_txp_cp;
5720 		struct hci_cp_read_rssi req_rssi_cp;
5721 		struct mgmt_pending_cmd *cmd;
5722 
5723 		hci_req_init(&req, hdev);
5724 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5725 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5726 			    &req_rssi_cp);
5727 
5728 		/* For LE links TX power does not change thus we don't need to
5729 		 * query for it once value is known.
5730 		 */
5731 		if (!bdaddr_type_is_le(cp->addr.type) ||
5732 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5733 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5734 			req_txp_cp.type = 0x00;
5735 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5736 				    sizeof(req_txp_cp), &req_txp_cp);
5737 		}
5738 
5739 		/* Max TX power needs to be read only once per connection */
5740 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5741 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5742 			req_txp_cp.type = 0x01;
5743 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5744 				    sizeof(req_txp_cp), &req_txp_cp);
5745 		}
5746 
5747 		err = hci_req_run(&req, conn_info_refresh_complete);
5748 		if (err < 0)
5749 			goto unlock;
5750 
5751 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5752 				       data, len);
5753 		if (!cmd) {
5754 			err = -ENOMEM;
5755 			goto unlock;
5756 		}
5757 
5758 		hci_conn_hold(conn);
5759 		cmd->user_data = hci_conn_get(conn);
5760 		cmd->cmd_complete = conn_info_cmd_complete;
5761 
5762 		conn->conn_info_timestamp = jiffies;
5763 	} else {
5764 		/* Cache is valid, just reply with values cached in hci_conn */
5765 		rp.rssi = conn->rssi;
5766 		rp.tx_power = conn->tx_power;
5767 		rp.max_tx_power = conn->max_tx_power;
5768 
5769 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5770 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5771 	}
5772 
5773 unlock:
5774 	hci_dev_unlock(hdev);
5775 	return err;
5776 }
5777 
5778 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5779 {
5780 	struct hci_conn *conn = cmd->user_data;
5781 	struct mgmt_rp_get_clock_info rp;
5782 	struct hci_dev *hdev;
5783 	int err;
5784 
5785 	memset(&rp, 0, sizeof(rp));
5786 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5787 
5788 	if (status)
5789 		goto complete;
5790 
5791 	hdev = hci_dev_get(cmd->index);
5792 	if (hdev) {
5793 		rp.local_clock = cpu_to_le32(hdev->clock);
5794 		hci_dev_put(hdev);
5795 	}
5796 
5797 	if (conn) {
5798 		rp.piconet_clock = cpu_to_le32(conn->clock);
5799 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5800 	}
5801 
5802 complete:
5803 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5804 				sizeof(rp));
5805 
5806 	if (conn) {
5807 		hci_conn_drop(conn);
5808 		hci_conn_put(conn);
5809 	}
5810 
5811 	return err;
5812 }
5813 
5814 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5815 {
5816 	struct hci_cp_read_clock *hci_cp;
5817 	struct mgmt_pending_cmd *cmd;
5818 	struct hci_conn *conn;
5819 
5820 	BT_DBG("%s status %u", hdev->name, status);
5821 
5822 	hci_dev_lock(hdev);
5823 
5824 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5825 	if (!hci_cp)
5826 		goto unlock;
5827 
5828 	if (hci_cp->which) {
5829 		u16 handle = __le16_to_cpu(hci_cp->handle);
5830 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5831 	} else {
5832 		conn = NULL;
5833 	}
5834 
5835 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5836 	if (!cmd)
5837 		goto unlock;
5838 
5839 	cmd->cmd_complete(cmd, mgmt_status(status));
5840 	mgmt_pending_remove(cmd);
5841 
5842 unlock:
5843 	hci_dev_unlock(hdev);
5844 }
5845 
5846 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5847 			 u16 len)
5848 {
5849 	struct mgmt_cp_get_clock_info *cp = data;
5850 	struct mgmt_rp_get_clock_info rp;
5851 	struct hci_cp_read_clock hci_cp;
5852 	struct mgmt_pending_cmd *cmd;
5853 	struct hci_request req;
5854 	struct hci_conn *conn;
5855 	int err;
5856 
5857 	BT_DBG("%s", hdev->name);
5858 
5859 	memset(&rp, 0, sizeof(rp));
5860 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5861 	rp.addr.type = cp->addr.type;
5862 
5863 	if (cp->addr.type != BDADDR_BREDR)
5864 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5865 					 MGMT_STATUS_INVALID_PARAMS,
5866 					 &rp, sizeof(rp));
5867 
5868 	hci_dev_lock(hdev);
5869 
5870 	if (!hdev_is_powered(hdev)) {
5871 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5872 					MGMT_STATUS_NOT_POWERED, &rp,
5873 					sizeof(rp));
5874 		goto unlock;
5875 	}
5876 
5877 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5878 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5879 					       &cp->addr.bdaddr);
5880 		if (!conn || conn->state != BT_CONNECTED) {
5881 			err = mgmt_cmd_complete(sk, hdev->id,
5882 						MGMT_OP_GET_CLOCK_INFO,
5883 						MGMT_STATUS_NOT_CONNECTED,
5884 						&rp, sizeof(rp));
5885 			goto unlock;
5886 		}
5887 	} else {
5888 		conn = NULL;
5889 	}
5890 
5891 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5892 	if (!cmd) {
5893 		err = -ENOMEM;
5894 		goto unlock;
5895 	}
5896 
5897 	cmd->cmd_complete = clock_info_cmd_complete;
5898 
5899 	hci_req_init(&req, hdev);
5900 
5901 	memset(&hci_cp, 0, sizeof(hci_cp));
5902 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5903 
5904 	if (conn) {
5905 		hci_conn_hold(conn);
5906 		cmd->user_data = hci_conn_get(conn);
5907 
5908 		hci_cp.handle = cpu_to_le16(conn->handle);
5909 		hci_cp.which = 0x01; /* Piconet clock */
5910 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5911 	}
5912 
5913 	err = hci_req_run(&req, get_clock_info_complete);
5914 	if (err < 0)
5915 		mgmt_pending_remove(cmd);
5916 
5917 unlock:
5918 	hci_dev_unlock(hdev);
5919 	return err;
5920 }
5921 
5922 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5923 {
5924 	struct hci_conn *conn;
5925 
5926 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5927 	if (!conn)
5928 		return false;
5929 
5930 	if (conn->dst_type != type)
5931 		return false;
5932 
5933 	if (conn->state != BT_CONNECTED)
5934 		return false;
5935 
5936 	return true;
5937 }
5938 
5939 /* This function requires the caller holds hdev->lock */
5940 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5941 			       u8 addr_type, u8 auto_connect)
5942 {
5943 	struct hci_dev *hdev = req->hdev;
5944 	struct hci_conn_params *params;
5945 
5946 	params = hci_conn_params_add(hdev, addr, addr_type);
5947 	if (!params)
5948 		return -EIO;
5949 
5950 	if (params->auto_connect == auto_connect)
5951 		return 0;
5952 
5953 	list_del_init(&params->action);
5954 
5955 	switch (auto_connect) {
5956 	case HCI_AUTO_CONN_DISABLED:
5957 	case HCI_AUTO_CONN_LINK_LOSS:
5958 		__hci_update_background_scan(req);
5959 		break;
5960 	case HCI_AUTO_CONN_REPORT:
5961 		list_add(&params->action, &hdev->pend_le_reports);
5962 		__hci_update_background_scan(req);
5963 		break;
5964 	case HCI_AUTO_CONN_DIRECT:
5965 	case HCI_AUTO_CONN_ALWAYS:
5966 		if (!is_connected(hdev, addr, addr_type)) {
5967 			list_add(&params->action, &hdev->pend_le_conns);
5968 			__hci_update_background_scan(req);
5969 		}
5970 		break;
5971 	}
5972 
5973 	params->auto_connect = auto_connect;
5974 
5975 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5976 	       auto_connect);
5977 
5978 	return 0;
5979 }
5980 
5981 static void device_added(struct sock *sk, struct hci_dev *hdev,
5982 			 bdaddr_t *bdaddr, u8 type, u8 action)
5983 {
5984 	struct mgmt_ev_device_added ev;
5985 
5986 	bacpy(&ev.addr.bdaddr, bdaddr);
5987 	ev.addr.type = type;
5988 	ev.action = action;
5989 
5990 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5991 }
5992 
5993 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5994 {
5995 	struct mgmt_pending_cmd *cmd;
5996 
5997 	BT_DBG("status 0x%02x", status);
5998 
5999 	hci_dev_lock(hdev);
6000 
6001 	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6002 	if (!cmd)
6003 		goto unlock;
6004 
6005 	cmd->cmd_complete(cmd, mgmt_status(status));
6006 	mgmt_pending_remove(cmd);
6007 
6008 unlock:
6009 	hci_dev_unlock(hdev);
6010 }
6011 
6012 static int add_device(struct sock *sk, struct hci_dev *hdev,
6013 		      void *data, u16 len)
6014 {
6015 	struct mgmt_cp_add_device *cp = data;
6016 	struct mgmt_pending_cmd *cmd;
6017 	struct hci_request req;
6018 	u8 auto_conn, addr_type;
6019 	int err;
6020 
6021 	BT_DBG("%s", hdev->name);
6022 
6023 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6024 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6025 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6026 					 MGMT_STATUS_INVALID_PARAMS,
6027 					 &cp->addr, sizeof(cp->addr));
6028 
6029 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6030 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6031 					 MGMT_STATUS_INVALID_PARAMS,
6032 					 &cp->addr, sizeof(cp->addr));
6033 
6034 	hci_req_init(&req, hdev);
6035 
6036 	hci_dev_lock(hdev);
6037 
6038 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6039 	if (!cmd) {
6040 		err = -ENOMEM;
6041 		goto unlock;
6042 	}
6043 
6044 	cmd->cmd_complete = addr_cmd_complete;
6045 
6046 	if (cp->addr.type == BDADDR_BREDR) {
6047 		/* Only incoming connections action is supported for now */
6048 		if (cp->action != 0x01) {
6049 			err = cmd->cmd_complete(cmd,
6050 						MGMT_STATUS_INVALID_PARAMS);
6051 			mgmt_pending_remove(cmd);
6052 			goto unlock;
6053 		}
6054 
6055 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6056 					  cp->addr.type);
6057 		if (err)
6058 			goto unlock;
6059 
6060 		__hci_update_page_scan(&req);
6061 
6062 		goto added;
6063 	}
6064 
6065 	if (cp->addr.type == BDADDR_LE_PUBLIC)
6066 		addr_type = ADDR_LE_DEV_PUBLIC;
6067 	else
6068 		addr_type = ADDR_LE_DEV_RANDOM;
6069 
6070 	if (cp->action == 0x02)
6071 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6072 	else if (cp->action == 0x01)
6073 		auto_conn = HCI_AUTO_CONN_DIRECT;
6074 	else
6075 		auto_conn = HCI_AUTO_CONN_REPORT;
6076 
6077 	/* If the connection parameters don't exist for this device,
6078 	 * they will be created and configured with defaults.
6079 	 */
6080 	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6081 				auto_conn) < 0) {
6082 		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6083 		mgmt_pending_remove(cmd);
6084 		goto unlock;
6085 	}
6086 
6087 added:
6088 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6089 
6090 	err = hci_req_run(&req, add_device_complete);
6091 	if (err < 0) {
6092 		/* ENODATA means no HCI commands were needed (e.g. if
6093 		 * the adapter is powered off).
6094 		 */
6095 		if (err == -ENODATA)
6096 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6097 		mgmt_pending_remove(cmd);
6098 	}
6099 
6100 unlock:
6101 	hci_dev_unlock(hdev);
6102 	return err;
6103 }
6104 
6105 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6106 			   bdaddr_t *bdaddr, u8 type)
6107 {
6108 	struct mgmt_ev_device_removed ev;
6109 
6110 	bacpy(&ev.addr.bdaddr, bdaddr);
6111 	ev.addr.type = type;
6112 
6113 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6114 }
6115 
6116 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6117 {
6118 	struct mgmt_pending_cmd *cmd;
6119 
6120 	BT_DBG("status 0x%02x", status);
6121 
6122 	hci_dev_lock(hdev);
6123 
6124 	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6125 	if (!cmd)
6126 		goto unlock;
6127 
6128 	cmd->cmd_complete(cmd, mgmt_status(status));
6129 	mgmt_pending_remove(cmd);
6130 
6131 unlock:
6132 	hci_dev_unlock(hdev);
6133 }
6134 
6135 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6136 			 void *data, u16 len)
6137 {
6138 	struct mgmt_cp_remove_device *cp = data;
6139 	struct mgmt_pending_cmd *cmd;
6140 	struct hci_request req;
6141 	int err;
6142 
6143 	BT_DBG("%s", hdev->name);
6144 
6145 	hci_req_init(&req, hdev);
6146 
6147 	hci_dev_lock(hdev);
6148 
6149 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6150 	if (!cmd) {
6151 		err = -ENOMEM;
6152 		goto unlock;
6153 	}
6154 
6155 	cmd->cmd_complete = addr_cmd_complete;
6156 
6157 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6158 		struct hci_conn_params *params;
6159 		u8 addr_type;
6160 
6161 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6162 			err = cmd->cmd_complete(cmd,
6163 						MGMT_STATUS_INVALID_PARAMS);
6164 			mgmt_pending_remove(cmd);
6165 			goto unlock;
6166 		}
6167 
6168 		if (cp->addr.type == BDADDR_BREDR) {
6169 			err = hci_bdaddr_list_del(&hdev->whitelist,
6170 						  &cp->addr.bdaddr,
6171 						  cp->addr.type);
6172 			if (err) {
6173 				err = cmd->cmd_complete(cmd,
6174 							MGMT_STATUS_INVALID_PARAMS);
6175 				mgmt_pending_remove(cmd);
6176 				goto unlock;
6177 			}
6178 
6179 			__hci_update_page_scan(&req);
6180 
6181 			device_removed(sk, hdev, &cp->addr.bdaddr,
6182 				       cp->addr.type);
6183 			goto complete;
6184 		}
6185 
6186 		if (cp->addr.type == BDADDR_LE_PUBLIC)
6187 			addr_type = ADDR_LE_DEV_PUBLIC;
6188 		else
6189 			addr_type = ADDR_LE_DEV_RANDOM;
6190 
6191 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6192 						addr_type);
6193 		if (!params) {
6194 			err = cmd->cmd_complete(cmd,
6195 						MGMT_STATUS_INVALID_PARAMS);
6196 			mgmt_pending_remove(cmd);
6197 			goto unlock;
6198 		}
6199 
6200 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6201 			err = cmd->cmd_complete(cmd,
6202 						MGMT_STATUS_INVALID_PARAMS);
6203 			mgmt_pending_remove(cmd);
6204 			goto unlock;
6205 		}
6206 
6207 		list_del(&params->action);
6208 		list_del(&params->list);
6209 		kfree(params);
6210 		__hci_update_background_scan(&req);
6211 
6212 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6213 	} else {
6214 		struct hci_conn_params *p, *tmp;
6215 		struct bdaddr_list *b, *btmp;
6216 
6217 		if (cp->addr.type) {
6218 			err = cmd->cmd_complete(cmd,
6219 						MGMT_STATUS_INVALID_PARAMS);
6220 			mgmt_pending_remove(cmd);
6221 			goto unlock;
6222 		}
6223 
6224 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6225 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6226 			list_del(&b->list);
6227 			kfree(b);
6228 		}
6229 
6230 		__hci_update_page_scan(&req);
6231 
6232 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6233 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6234 				continue;
6235 			device_removed(sk, hdev, &p->addr, p->addr_type);
6236 			list_del(&p->action);
6237 			list_del(&p->list);
6238 			kfree(p);
6239 		}
6240 
6241 		BT_DBG("All LE connection parameters were removed");
6242 
6243 		__hci_update_background_scan(&req);
6244 	}
6245 
6246 complete:
6247 	err = hci_req_run(&req, remove_device_complete);
6248 	if (err < 0) {
6249 		/* ENODATA means no HCI commands were needed (e.g. if
6250 		 * the adapter is powered off).
6251 		 */
6252 		if (err == -ENODATA)
6253 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6254 		mgmt_pending_remove(cmd);
6255 	}
6256 
6257 unlock:
6258 	hci_dev_unlock(hdev);
6259 	return err;
6260 }
6261 
6262 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6263 			   u16 len)
6264 {
6265 	struct mgmt_cp_load_conn_param *cp = data;
6266 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6267 				     sizeof(struct mgmt_conn_param));
6268 	u16 param_count, expected_len;
6269 	int i;
6270 
6271 	if (!lmp_le_capable(hdev))
6272 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6273 				       MGMT_STATUS_NOT_SUPPORTED);
6274 
6275 	param_count = __le16_to_cpu(cp->param_count);
6276 	if (param_count > max_param_count) {
6277 		BT_ERR("load_conn_param: too big param_count value %u",
6278 		       param_count);
6279 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6280 				       MGMT_STATUS_INVALID_PARAMS);
6281 	}
6282 
6283 	expected_len = sizeof(*cp) + param_count *
6284 					sizeof(struct mgmt_conn_param);
6285 	if (expected_len != len) {
6286 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6287 		       expected_len, len);
6288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6289 				       MGMT_STATUS_INVALID_PARAMS);
6290 	}
6291 
6292 	BT_DBG("%s param_count %u", hdev->name, param_count);
6293 
6294 	hci_dev_lock(hdev);
6295 
6296 	hci_conn_params_clear_disabled(hdev);
6297 
6298 	for (i = 0; i < param_count; i++) {
6299 		struct mgmt_conn_param *param = &cp->params[i];
6300 		struct hci_conn_params *hci_param;
6301 		u16 min, max, latency, timeout;
6302 		u8 addr_type;
6303 
6304 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6305 		       param->addr.type);
6306 
6307 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6308 			addr_type = ADDR_LE_DEV_PUBLIC;
6309 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6310 			addr_type = ADDR_LE_DEV_RANDOM;
6311 		} else {
6312 			BT_ERR("Ignoring invalid connection parameters");
6313 			continue;
6314 		}
6315 
6316 		min = le16_to_cpu(param->min_interval);
6317 		max = le16_to_cpu(param->max_interval);
6318 		latency = le16_to_cpu(param->latency);
6319 		timeout = le16_to_cpu(param->timeout);
6320 
6321 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6322 		       min, max, latency, timeout);
6323 
6324 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6325 			BT_ERR("Ignoring invalid connection parameters");
6326 			continue;
6327 		}
6328 
6329 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6330 						addr_type);
6331 		if (!hci_param) {
6332 			BT_ERR("Failed to add connection parameters");
6333 			continue;
6334 		}
6335 
6336 		hci_param->conn_min_interval = min;
6337 		hci_param->conn_max_interval = max;
6338 		hci_param->conn_latency = latency;
6339 		hci_param->supervision_timeout = timeout;
6340 	}
6341 
6342 	hci_dev_unlock(hdev);
6343 
6344 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6345 				 NULL, 0);
6346 }
6347 
6348 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6349 			       void *data, u16 len)
6350 {
6351 	struct mgmt_cp_set_external_config *cp = data;
6352 	bool changed;
6353 	int err;
6354 
6355 	BT_DBG("%s", hdev->name);
6356 
6357 	if (hdev_is_powered(hdev))
6358 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6359 				       MGMT_STATUS_REJECTED);
6360 
6361 	if (cp->config != 0x00 && cp->config != 0x01)
6362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6363 				         MGMT_STATUS_INVALID_PARAMS);
6364 
6365 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6366 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6367 				       MGMT_STATUS_NOT_SUPPORTED);
6368 
6369 	hci_dev_lock(hdev);
6370 
6371 	if (cp->config)
6372 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6373 	else
6374 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6375 
6376 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6377 	if (err < 0)
6378 		goto unlock;
6379 
6380 	if (!changed)
6381 		goto unlock;
6382 
6383 	err = new_options(hdev, sk);
6384 
6385 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6386 		mgmt_index_removed(hdev);
6387 
6388 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6389 			hci_dev_set_flag(hdev, HCI_CONFIG);
6390 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6391 
6392 			queue_work(hdev->req_workqueue, &hdev->power_on);
6393 		} else {
6394 			set_bit(HCI_RAW, &hdev->flags);
6395 			mgmt_index_added(hdev);
6396 		}
6397 	}
6398 
6399 unlock:
6400 	hci_dev_unlock(hdev);
6401 	return err;
6402 }
6403 
6404 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6405 			      void *data, u16 len)
6406 {
6407 	struct mgmt_cp_set_public_address *cp = data;
6408 	bool changed;
6409 	int err;
6410 
6411 	BT_DBG("%s", hdev->name);
6412 
6413 	if (hdev_is_powered(hdev))
6414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6415 				       MGMT_STATUS_REJECTED);
6416 
6417 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6418 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6419 				       MGMT_STATUS_INVALID_PARAMS);
6420 
6421 	if (!hdev->set_bdaddr)
6422 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6423 				       MGMT_STATUS_NOT_SUPPORTED);
6424 
6425 	hci_dev_lock(hdev);
6426 
6427 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6428 	bacpy(&hdev->public_addr, &cp->bdaddr);
6429 
6430 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6431 	if (err < 0)
6432 		goto unlock;
6433 
6434 	if (!changed)
6435 		goto unlock;
6436 
6437 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6438 		err = new_options(hdev, sk);
6439 
6440 	if (is_configured(hdev)) {
6441 		mgmt_index_removed(hdev);
6442 
6443 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6444 
6445 		hci_dev_set_flag(hdev, HCI_CONFIG);
6446 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6447 
6448 		queue_work(hdev->req_workqueue, &hdev->power_on);
6449 	}
6450 
6451 unlock:
6452 	hci_dev_unlock(hdev);
6453 	return err;
6454 }
6455 
6456 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6457 				  u8 data_len)
6458 {
6459 	eir[eir_len++] = sizeof(type) + data_len;
6460 	eir[eir_len++] = type;
6461 	memcpy(&eir[eir_len], data, data_len);
6462 	eir_len += data_len;
6463 
6464 	return eir_len;
6465 }
6466 
6467 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6468 				   void *data, u16 data_len)
6469 {
6470 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6471 	struct mgmt_rp_read_local_oob_ext_data *rp;
6472 	size_t rp_len;
6473 	u16 eir_len;
6474 	u8 status, flags, role, addr[7], hash[16], rand[16];
6475 	int err;
6476 
6477 	BT_DBG("%s", hdev->name);
6478 
6479 	if (hdev_is_powered(hdev)) {
6480 		switch (cp->type) {
6481 		case BIT(BDADDR_BREDR):
6482 			status = mgmt_bredr_support(hdev);
6483 			if (status)
6484 				eir_len = 0;
6485 			else
6486 				eir_len = 5;
6487 			break;
6488 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6489 			status = mgmt_le_support(hdev);
6490 			if (status)
6491 				eir_len = 0;
6492 			else
6493 				eir_len = 9 + 3 + 18 + 18 + 3;
6494 			break;
6495 		default:
6496 			status = MGMT_STATUS_INVALID_PARAMS;
6497 			eir_len = 0;
6498 			break;
6499 		}
6500 	} else {
6501 		status = MGMT_STATUS_NOT_POWERED;
6502 		eir_len = 0;
6503 	}
6504 
6505 	rp_len = sizeof(*rp) + eir_len;
6506 	rp = kmalloc(rp_len, GFP_ATOMIC);
6507 	if (!rp)
6508 		return -ENOMEM;
6509 
6510 	if (status)
6511 		goto complete;
6512 
6513 	hci_dev_lock(hdev);
6514 
6515 	eir_len = 0;
6516 	switch (cp->type) {
6517 	case BIT(BDADDR_BREDR):
6518 		eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6519 					  hdev->dev_class, 3);
6520 		break;
6521 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6522 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6523 		    smp_generate_oob(hdev, hash, rand) < 0) {
6524 			hci_dev_unlock(hdev);
6525 			status = MGMT_STATUS_FAILED;
6526 			goto complete;
6527 		}
6528 
6529 		/* This should return the active RPA, but since the RPA
6530 		 * is only programmed on demand, it is really hard to fill
6531 		 * this in at the moment. For now disallow retrieving
6532 		 * local out-of-band data when privacy is in use.
6533 		 *
6534 		 * Returning the identity address will not help here since
6535 		 * pairing happens before the identity resolving key is
6536 		 * known and thus the connection establishment happens
6537 		 * based on the RPA and not the identity address.
6538 		 */
6539 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6540 			hci_dev_unlock(hdev);
6541 			status = MGMT_STATUS_REJECTED;
6542 			goto complete;
6543 		}
6544 
6545 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6546 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6547 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6548 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6549 			memcpy(addr, &hdev->static_addr, 6);
6550 			addr[6] = 0x01;
6551 		} else {
6552 			memcpy(addr, &hdev->bdaddr, 6);
6553 			addr[6] = 0x00;
6554 		}
6555 
6556 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6557 					  addr, sizeof(addr));
6558 
6559 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6560 			role = 0x02;
6561 		else
6562 			role = 0x01;
6563 
6564 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6565 					  &role, sizeof(role));
6566 
6567 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6568 			eir_len = eir_append_data(rp->eir, eir_len,
6569 						  EIR_LE_SC_CONFIRM,
6570 						  hash, sizeof(hash));
6571 
6572 			eir_len = eir_append_data(rp->eir, eir_len,
6573 						  EIR_LE_SC_RANDOM,
6574 						  rand, sizeof(rand));
6575 		}
6576 
6577 		flags = get_adv_discov_flags(hdev);
6578 
6579 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6580 			flags |= LE_AD_NO_BREDR;
6581 
6582 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6583 					  &flags, sizeof(flags));
6584 		break;
6585 	}
6586 
6587 	hci_dev_unlock(hdev);
6588 
6589 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6590 
6591 	status = MGMT_STATUS_SUCCESS;
6592 
6593 complete:
6594 	rp->type = cp->type;
6595 	rp->eir_len = cpu_to_le16(eir_len);
6596 
6597 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6598 				status, rp, sizeof(*rp) + eir_len);
6599 	if (err < 0 || status)
6600 		goto done;
6601 
6602 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6603 				 rp, sizeof(*rp) + eir_len,
6604 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6605 
6606 done:
6607 	kfree(rp);
6608 
6609 	return err;
6610 }
6611 
6612 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6613 {
6614 	u32 flags = 0;
6615 
6616 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6617 	flags |= MGMT_ADV_FLAG_DISCOV;
6618 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6619 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6620 
6621 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6622 		flags |= MGMT_ADV_FLAG_TX_POWER;
6623 
6624 	return flags;
6625 }
6626 
6627 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6628 			     void *data, u16 data_len)
6629 {
6630 	struct mgmt_rp_read_adv_features *rp;
6631 	size_t rp_len;
6632 	int err;
6633 	bool instance;
6634 	u32 supported_flags;
6635 
6636 	BT_DBG("%s", hdev->name);
6637 
6638 	if (!lmp_le_capable(hdev))
6639 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6640 				       MGMT_STATUS_REJECTED);
6641 
6642 	hci_dev_lock(hdev);
6643 
6644 	rp_len = sizeof(*rp);
6645 
6646 	/* Currently only one instance is supported, so just add 1 to the
6647 	 * response length.
6648 	 */
6649 	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6650 	if (instance)
6651 		rp_len++;
6652 
6653 	rp = kmalloc(rp_len, GFP_ATOMIC);
6654 	if (!rp) {
6655 		hci_dev_unlock(hdev);
6656 		return -ENOMEM;
6657 	}
6658 
6659 	supported_flags = get_supported_adv_flags(hdev);
6660 
6661 	rp->supported_flags = cpu_to_le32(supported_flags);
6662 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6663 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6664 	rp->max_instances = 1;
6665 
6666 	/* Currently only one instance is supported, so simply return the
6667 	 * current instance number.
6668 	 */
6669 	if (instance) {
6670 		rp->num_instances = 1;
6671 		rp->instance[0] = 1;
6672 	} else {
6673 		rp->num_instances = 0;
6674 	}
6675 
6676 	hci_dev_unlock(hdev);
6677 
6678 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6679 				MGMT_STATUS_SUCCESS, rp, rp_len);
6680 
6681 	kfree(rp);
6682 
6683 	return err;
6684 }
6685 
6686 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6687 			      u8 len, bool is_adv_data)
6688 {
6689 	u8 max_len = HCI_MAX_AD_LENGTH;
6690 	int i, cur_len;
6691 	bool flags_managed = false;
6692 	bool tx_power_managed = false;
6693 	u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6694 			   MGMT_ADV_FLAG_MANAGED_FLAGS;
6695 
6696 	if (is_adv_data && (adv_flags & flags_params)) {
6697 		flags_managed = true;
6698 		max_len -= 3;
6699 	}
6700 
6701 	if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6702 		tx_power_managed = true;
6703 		max_len -= 3;
6704 	}
6705 
6706 	if (len > max_len)
6707 		return false;
6708 
6709 	/* Make sure that the data is correctly formatted. */
6710 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6711 		cur_len = data[i];
6712 
6713 		if (flags_managed && data[i + 1] == EIR_FLAGS)
6714 			return false;
6715 
6716 		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6717 			return false;
6718 
6719 		/* If the current field length would exceed the total data
6720 		 * length, then it's invalid.
6721 		 */
6722 		if (i + cur_len >= len)
6723 			return false;
6724 	}
6725 
6726 	return true;
6727 }
6728 
6729 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6730 				     u16 opcode)
6731 {
6732 	struct mgmt_pending_cmd *cmd;
6733 	struct mgmt_rp_add_advertising rp;
6734 
6735 	BT_DBG("status %d", status);
6736 
6737 	hci_dev_lock(hdev);
6738 
6739 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6740 
6741 	if (status) {
6742 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6743 		memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6744 		advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6745 	}
6746 
6747 	if (!cmd)
6748 		goto unlock;
6749 
6750 	rp.instance = 0x01;
6751 
6752 	if (status)
6753 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6754 				mgmt_status(status));
6755 	else
6756 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6757 				  mgmt_status(status), &rp, sizeof(rp));
6758 
6759 	mgmt_pending_remove(cmd);
6760 
6761 unlock:
6762 	hci_dev_unlock(hdev);
6763 }
6764 
6765 static void adv_timeout_expired(struct work_struct *work)
6766 {
6767 	struct hci_dev *hdev = container_of(work, struct hci_dev,
6768 					    adv_instance.timeout_exp.work);
6769 
6770 	hdev->adv_instance.timeout = 0;
6771 
6772 	hci_dev_lock(hdev);
6773 	clear_adv_instance(hdev);
6774 	hci_dev_unlock(hdev);
6775 }
6776 
6777 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6778 			   void *data, u16 data_len)
6779 {
6780 	struct mgmt_cp_add_advertising *cp = data;
6781 	struct mgmt_rp_add_advertising rp;
6782 	u32 flags;
6783 	u32 supported_flags;
6784 	u8 status;
6785 	u16 timeout;
6786 	int err;
6787 	struct mgmt_pending_cmd *cmd;
6788 	struct hci_request req;
6789 
6790 	BT_DBG("%s", hdev->name);
6791 
6792 	status = mgmt_le_support(hdev);
6793 	if (status)
6794 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6795 				       status);
6796 
6797 	flags = __le32_to_cpu(cp->flags);
6798 	timeout = __le16_to_cpu(cp->timeout);
6799 
6800 	/* The current implementation only supports adding one instance and only
6801 	 * a subset of the specified flags.
6802 	 */
6803 	supported_flags = get_supported_adv_flags(hdev);
6804 	if (cp->instance != 0x01 || (flags & ~supported_flags))
6805 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6806 				       MGMT_STATUS_INVALID_PARAMS);
6807 
6808 	hci_dev_lock(hdev);
6809 
6810 	if (timeout && !hdev_is_powered(hdev)) {
6811 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6812 				      MGMT_STATUS_REJECTED);
6813 		goto unlock;
6814 	}
6815 
6816 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6817 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6818 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6819 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6820 				      MGMT_STATUS_BUSY);
6821 		goto unlock;
6822 	}
6823 
6824 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6825 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6826 			       cp->scan_rsp_len, false)) {
6827 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6828 				      MGMT_STATUS_INVALID_PARAMS);
6829 		goto unlock;
6830 	}
6831 
6832 	INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6833 
6834 	hdev->adv_instance.flags = flags;
6835 	hdev->adv_instance.adv_data_len = cp->adv_data_len;
6836 	hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6837 
6838 	if (cp->adv_data_len)
6839 		memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6840 
6841 	if (cp->scan_rsp_len)
6842 		memcpy(hdev->adv_instance.scan_rsp_data,
6843 		       cp->data + cp->adv_data_len, cp->scan_rsp_len);
6844 
6845 	if (hdev->adv_instance.timeout)
6846 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6847 
6848 	hdev->adv_instance.timeout = timeout;
6849 
6850 	if (timeout)
6851 		queue_delayed_work(hdev->workqueue,
6852 				   &hdev->adv_instance.timeout_exp,
6853 				   msecs_to_jiffies(timeout * 1000));
6854 
6855 	if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
6856 		advertising_added(sk, hdev, 1);
6857 
6858 	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
6859 	 * we have no HCI communication to make. Simply return.
6860 	 */
6861 	if (!hdev_is_powered(hdev) ||
6862 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6863 		rp.instance = 0x01;
6864 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6865 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6866 		goto unlock;
6867 	}
6868 
6869 	/* We're good to go, update advertising data, parameters, and start
6870 	 * advertising.
6871 	 */
6872 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6873 			       data_len);
6874 	if (!cmd) {
6875 		err = -ENOMEM;
6876 		goto unlock;
6877 	}
6878 
6879 	hci_req_init(&req, hdev);
6880 
6881 	update_adv_data(&req);
6882 	update_scan_rsp_data(&req);
6883 	enable_advertising(&req);
6884 
6885 	err = hci_req_run(&req, add_advertising_complete);
6886 	if (err < 0)
6887 		mgmt_pending_remove(cmd);
6888 
6889 unlock:
6890 	hci_dev_unlock(hdev);
6891 
6892 	return err;
6893 }
6894 
6895 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6896 					u16 opcode)
6897 {
6898 	struct mgmt_pending_cmd *cmd;
6899 	struct mgmt_rp_remove_advertising rp;
6900 
6901 	BT_DBG("status %d", status);
6902 
6903 	hci_dev_lock(hdev);
6904 
6905 	/* A failure status here only means that we failed to disable
6906 	 * advertising. Otherwise, the advertising instance has been removed,
6907 	 * so report success.
6908 	 */
6909 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6910 	if (!cmd)
6911 		goto unlock;
6912 
6913 	rp.instance = 1;
6914 
6915 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6916 			  &rp, sizeof(rp));
6917 	mgmt_pending_remove(cmd);
6918 
6919 unlock:
6920 	hci_dev_unlock(hdev);
6921 }
6922 
6923 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6924 			      void *data, u16 data_len)
6925 {
6926 	struct mgmt_cp_remove_advertising *cp = data;
6927 	struct mgmt_rp_remove_advertising rp;
6928 	int err;
6929 	struct mgmt_pending_cmd *cmd;
6930 	struct hci_request req;
6931 
6932 	BT_DBG("%s", hdev->name);
6933 
6934 	/* The current implementation only allows modifying instance no 1. A
6935 	 * value of 0 indicates that all instances should be cleared.
6936 	 */
6937 	if (cp->instance > 1)
6938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6939 				       MGMT_STATUS_INVALID_PARAMS);
6940 
6941 	hci_dev_lock(hdev);
6942 
6943 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6944 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6945 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6946 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6947 				      MGMT_STATUS_BUSY);
6948 		goto unlock;
6949 	}
6950 
6951 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
6952 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6953 				      MGMT_STATUS_INVALID_PARAMS);
6954 		goto unlock;
6955 	}
6956 
6957 	if (hdev->adv_instance.timeout)
6958 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6959 
6960 	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6961 
6962 	advertising_removed(sk, hdev, 1);
6963 
6964 	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6965 
6966 	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
6967 	 * we have no HCI communication to make. Simply return.
6968 	 */
6969 	if (!hdev_is_powered(hdev) ||
6970 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6971 		rp.instance = 1;
6972 		err = mgmt_cmd_complete(sk, hdev->id,
6973 					MGMT_OP_REMOVE_ADVERTISING,
6974 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6975 		goto unlock;
6976 	}
6977 
6978 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6979 			       data_len);
6980 	if (!cmd) {
6981 		err = -ENOMEM;
6982 		goto unlock;
6983 	}
6984 
6985 	hci_req_init(&req, hdev);
6986 	disable_advertising(&req);
6987 
6988 	err = hci_req_run(&req, remove_advertising_complete);
6989 	if (err < 0)
6990 		mgmt_pending_remove(cmd);
6991 
6992 unlock:
6993 	hci_dev_unlock(hdev);
6994 
6995 	return err;
6996 }
6997 
6998 static const struct hci_mgmt_handler mgmt_handlers[] = {
6999 	{ NULL }, /* 0x0000 (no command) */
7000 	{ read_version,            MGMT_READ_VERSION_SIZE,
7001 						HCI_MGMT_NO_HDEV |
7002 						HCI_MGMT_UNTRUSTED },
7003 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7004 						HCI_MGMT_NO_HDEV |
7005 						HCI_MGMT_UNTRUSTED },
7006 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7007 						HCI_MGMT_NO_HDEV |
7008 						HCI_MGMT_UNTRUSTED },
7009 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7010 						HCI_MGMT_UNTRUSTED },
7011 	{ set_powered,             MGMT_SETTING_SIZE },
7012 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7013 	{ set_connectable,         MGMT_SETTING_SIZE },
7014 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7015 	{ set_bondable,            MGMT_SETTING_SIZE },
7016 	{ set_link_security,       MGMT_SETTING_SIZE },
7017 	{ set_ssp,                 MGMT_SETTING_SIZE },
7018 	{ set_hs,                  MGMT_SETTING_SIZE },
7019 	{ set_le,                  MGMT_SETTING_SIZE },
7020 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7021 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7022 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7023 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7024 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7025 						HCI_MGMT_VAR_LEN },
7026 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7027 						HCI_MGMT_VAR_LEN },
7028 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7029 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7030 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7031 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7032 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7033 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7034 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7035 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7036 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7037 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7038 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7039 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7040 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7041 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7042 						HCI_MGMT_VAR_LEN },
7043 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7044 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7045 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7046 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7047 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7048 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7049 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7050 	{ set_advertising,         MGMT_SETTING_SIZE },
7051 	{ set_bredr,               MGMT_SETTING_SIZE },
7052 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7053 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7054 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7055 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7056 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7057 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7058 						HCI_MGMT_VAR_LEN },
7059 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7060 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7061 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7062 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7063 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7064 						HCI_MGMT_VAR_LEN },
7065 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7066 						HCI_MGMT_NO_HDEV |
7067 						HCI_MGMT_UNTRUSTED },
7068 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7069 						HCI_MGMT_UNCONFIGURED |
7070 						HCI_MGMT_UNTRUSTED },
7071 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7072 						HCI_MGMT_UNCONFIGURED },
7073 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7074 						HCI_MGMT_UNCONFIGURED },
7075 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7076 						HCI_MGMT_VAR_LEN },
7077 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7078 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7079 						HCI_MGMT_NO_HDEV |
7080 						HCI_MGMT_UNTRUSTED },
7081 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7082 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7083 						HCI_MGMT_VAR_LEN },
7084 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7085 };
7086 
7087 void mgmt_index_added(struct hci_dev *hdev)
7088 {
7089 	struct mgmt_ev_ext_index ev;
7090 
7091 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7092 		return;
7093 
7094 	switch (hdev->dev_type) {
7095 	case HCI_BREDR:
7096 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7097 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7098 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7099 			ev.type = 0x01;
7100 		} else {
7101 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7102 					 HCI_MGMT_INDEX_EVENTS);
7103 			ev.type = 0x00;
7104 		}
7105 		break;
7106 	case HCI_AMP:
7107 		ev.type = 0x02;
7108 		break;
7109 	default:
7110 		return;
7111 	}
7112 
7113 	ev.bus = hdev->bus;
7114 
7115 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7116 			 HCI_MGMT_EXT_INDEX_EVENTS);
7117 }
7118 
7119 void mgmt_index_removed(struct hci_dev *hdev)
7120 {
7121 	struct mgmt_ev_ext_index ev;
7122 	u8 status = MGMT_STATUS_INVALID_INDEX;
7123 
7124 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7125 		return;
7126 
7127 	switch (hdev->dev_type) {
7128 	case HCI_BREDR:
7129 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7130 
7131 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7132 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7133 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7134 			ev.type = 0x01;
7135 		} else {
7136 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7137 					 HCI_MGMT_INDEX_EVENTS);
7138 			ev.type = 0x00;
7139 		}
7140 		break;
7141 	case HCI_AMP:
7142 		ev.type = 0x02;
7143 		break;
7144 	default:
7145 		return;
7146 	}
7147 
7148 	ev.bus = hdev->bus;
7149 
7150 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7151 			 HCI_MGMT_EXT_INDEX_EVENTS);
7152 }
7153 
7154 /* This function requires the caller holds hdev->lock */
7155 static void restart_le_actions(struct hci_request *req)
7156 {
7157 	struct hci_dev *hdev = req->hdev;
7158 	struct hci_conn_params *p;
7159 
7160 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7161 		/* Needed for AUTO_OFF case where might not "really"
7162 		 * have been powered off.
7163 		 */
7164 		list_del_init(&p->action);
7165 
7166 		switch (p->auto_connect) {
7167 		case HCI_AUTO_CONN_DIRECT:
7168 		case HCI_AUTO_CONN_ALWAYS:
7169 			list_add(&p->action, &hdev->pend_le_conns);
7170 			break;
7171 		case HCI_AUTO_CONN_REPORT:
7172 			list_add(&p->action, &hdev->pend_le_reports);
7173 			break;
7174 		default:
7175 			break;
7176 		}
7177 	}
7178 
7179 	__hci_update_background_scan(req);
7180 }
7181 
7182 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7183 {
7184 	struct cmd_lookup match = { NULL, hdev };
7185 
7186 	BT_DBG("status 0x%02x", status);
7187 
7188 	if (!status) {
7189 		/* Register the available SMP channels (BR/EDR and LE) only
7190 		 * when successfully powering on the controller. This late
7191 		 * registration is required so that LE SMP can clearly
7192 		 * decide if the public address or static address is used.
7193 		 */
7194 		smp_register(hdev);
7195 	}
7196 
7197 	hci_dev_lock(hdev);
7198 
7199 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7200 
7201 	new_settings(hdev, match.sk);
7202 
7203 	hci_dev_unlock(hdev);
7204 
7205 	if (match.sk)
7206 		sock_put(match.sk);
7207 }
7208 
7209 static int powered_update_hci(struct hci_dev *hdev)
7210 {
7211 	struct hci_request req;
7212 	u8 link_sec;
7213 
7214 	hci_req_init(&req, hdev);
7215 
7216 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7217 	    !lmp_host_ssp_capable(hdev)) {
7218 		u8 mode = 0x01;
7219 
7220 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7221 
7222 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7223 			u8 support = 0x01;
7224 
7225 			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7226 				    sizeof(support), &support);
7227 		}
7228 	}
7229 
7230 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7231 	    lmp_bredr_capable(hdev)) {
7232 		struct hci_cp_write_le_host_supported cp;
7233 
7234 		cp.le = 0x01;
7235 		cp.simul = 0x00;
7236 
7237 		/* Check first if we already have the right
7238 		 * host state (host features set)
7239 		 */
7240 		if (cp.le != lmp_host_le_capable(hdev) ||
7241 		    cp.simul != lmp_host_le_br_capable(hdev))
7242 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7243 				    sizeof(cp), &cp);
7244 	}
7245 
7246 	if (lmp_le_capable(hdev)) {
7247 		/* Make sure the controller has a good default for
7248 		 * advertising data. This also applies to the case
7249 		 * where BR/EDR was toggled during the AUTO_OFF phase.
7250 		 */
7251 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7252 			update_adv_data(&req);
7253 			update_scan_rsp_data(&req);
7254 		}
7255 
7256 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7257 		    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7258 			enable_advertising(&req);
7259 
7260 		restart_le_actions(&req);
7261 	}
7262 
7263 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7264 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7265 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7266 			    sizeof(link_sec), &link_sec);
7267 
7268 	if (lmp_bredr_capable(hdev)) {
7269 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7270 			write_fast_connectable(&req, true);
7271 		else
7272 			write_fast_connectable(&req, false);
7273 		__hci_update_page_scan(&req);
7274 		update_class(&req);
7275 		update_name(&req);
7276 		update_eir(&req);
7277 	}
7278 
7279 	return hci_req_run(&req, powered_complete);
7280 }
7281 
7282 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7283 {
7284 	struct cmd_lookup match = { NULL, hdev };
7285 	u8 status, zero_cod[] = { 0, 0, 0 };
7286 	int err;
7287 
7288 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
7289 		return 0;
7290 
7291 	if (powered) {
7292 		if (powered_update_hci(hdev) == 0)
7293 			return 0;
7294 
7295 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7296 				     &match);
7297 		goto new_settings;
7298 	}
7299 
7300 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7301 
7302 	/* If the power off is because of hdev unregistration let
7303 	 * use the appropriate INVALID_INDEX status. Otherwise use
7304 	 * NOT_POWERED. We cover both scenarios here since later in
7305 	 * mgmt_index_removed() any hci_conn callbacks will have already
7306 	 * been triggered, potentially causing misleading DISCONNECTED
7307 	 * status responses.
7308 	 */
7309 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7310 		status = MGMT_STATUS_INVALID_INDEX;
7311 	else
7312 		status = MGMT_STATUS_NOT_POWERED;
7313 
7314 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7315 
7316 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7317 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7318 				   zero_cod, sizeof(zero_cod), NULL);
7319 
7320 new_settings:
7321 	err = new_settings(hdev, match.sk);
7322 
7323 	if (match.sk)
7324 		sock_put(match.sk);
7325 
7326 	return err;
7327 }
7328 
7329 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7330 {
7331 	struct mgmt_pending_cmd *cmd;
7332 	u8 status;
7333 
7334 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7335 	if (!cmd)
7336 		return;
7337 
7338 	if (err == -ERFKILL)
7339 		status = MGMT_STATUS_RFKILLED;
7340 	else
7341 		status = MGMT_STATUS_FAILED;
7342 
7343 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7344 
7345 	mgmt_pending_remove(cmd);
7346 }
7347 
7348 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7349 {
7350 	struct hci_request req;
7351 
7352 	hci_dev_lock(hdev);
7353 
7354 	/* When discoverable timeout triggers, then just make sure
7355 	 * the limited discoverable flag is cleared. Even in the case
7356 	 * of a timeout triggered from general discoverable, it is
7357 	 * safe to unconditionally clear the flag.
7358 	 */
7359 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7360 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7361 
7362 	hci_req_init(&req, hdev);
7363 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7364 		u8 scan = SCAN_PAGE;
7365 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7366 			    sizeof(scan), &scan);
7367 	}
7368 	update_class(&req);
7369 
7370 	/* Advertising instances don't use the global discoverable setting, so
7371 	 * only update AD if advertising was enabled using Set Advertising.
7372 	 */
7373 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7374 		update_adv_data(&req);
7375 
7376 	hci_req_run(&req, NULL);
7377 
7378 	hdev->discov_timeout = 0;
7379 
7380 	new_settings(hdev, NULL);
7381 
7382 	hci_dev_unlock(hdev);
7383 }
7384 
7385 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7386 		       bool persistent)
7387 {
7388 	struct mgmt_ev_new_link_key ev;
7389 
7390 	memset(&ev, 0, sizeof(ev));
7391 
7392 	ev.store_hint = persistent;
7393 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7394 	ev.key.addr.type = BDADDR_BREDR;
7395 	ev.key.type = key->type;
7396 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7397 	ev.key.pin_len = key->pin_len;
7398 
7399 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7400 }
7401 
7402 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7403 {
7404 	switch (ltk->type) {
7405 	case SMP_LTK:
7406 	case SMP_LTK_SLAVE:
7407 		if (ltk->authenticated)
7408 			return MGMT_LTK_AUTHENTICATED;
7409 		return MGMT_LTK_UNAUTHENTICATED;
7410 	case SMP_LTK_P256:
7411 		if (ltk->authenticated)
7412 			return MGMT_LTK_P256_AUTH;
7413 		return MGMT_LTK_P256_UNAUTH;
7414 	case SMP_LTK_P256_DEBUG:
7415 		return MGMT_LTK_P256_DEBUG;
7416 	}
7417 
7418 	return MGMT_LTK_UNAUTHENTICATED;
7419 }
7420 
7421 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7422 {
7423 	struct mgmt_ev_new_long_term_key ev;
7424 
7425 	memset(&ev, 0, sizeof(ev));
7426 
7427 	/* Devices using resolvable or non-resolvable random addresses
7428 	 * without providing an indentity resolving key don't require
7429 	 * to store long term keys. Their addresses will change the
7430 	 * next time around.
7431 	 *
7432 	 * Only when a remote device provides an identity address
7433 	 * make sure the long term key is stored. If the remote
7434 	 * identity is known, the long term keys are internally
7435 	 * mapped to the identity address. So allow static random
7436 	 * and public addresses here.
7437 	 */
7438 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7439 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7440 		ev.store_hint = 0x00;
7441 	else
7442 		ev.store_hint = persistent;
7443 
7444 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7445 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7446 	ev.key.type = mgmt_ltk_type(key);
7447 	ev.key.enc_size = key->enc_size;
7448 	ev.key.ediv = key->ediv;
7449 	ev.key.rand = key->rand;
7450 
7451 	if (key->type == SMP_LTK)
7452 		ev.key.master = 1;
7453 
7454 	memcpy(ev.key.val, key->val, sizeof(key->val));
7455 
7456 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7457 }
7458 
7459 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7460 {
7461 	struct mgmt_ev_new_irk ev;
7462 
7463 	memset(&ev, 0, sizeof(ev));
7464 
7465 	/* For identity resolving keys from devices that are already
7466 	 * using a public address or static random address, do not
7467 	 * ask for storing this key. The identity resolving key really
7468 	 * is only mandatory for devices using resovlable random
7469 	 * addresses.
7470 	 *
7471 	 * Storing all identity resolving keys has the downside that
7472 	 * they will be also loaded on next boot of they system. More
7473 	 * identity resolving keys, means more time during scanning is
7474 	 * needed to actually resolve these addresses.
7475 	 */
7476 	if (bacmp(&irk->rpa, BDADDR_ANY))
7477 		ev.store_hint = 0x01;
7478 	else
7479 		ev.store_hint = 0x00;
7480 
7481 	bacpy(&ev.rpa, &irk->rpa);
7482 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7483 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7484 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7485 
7486 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7487 }
7488 
7489 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7490 		   bool persistent)
7491 {
7492 	struct mgmt_ev_new_csrk ev;
7493 
7494 	memset(&ev, 0, sizeof(ev));
7495 
7496 	/* Devices using resolvable or non-resolvable random addresses
7497 	 * without providing an indentity resolving key don't require
7498 	 * to store signature resolving keys. Their addresses will change
7499 	 * the next time around.
7500 	 *
7501 	 * Only when a remote device provides an identity address
7502 	 * make sure the signature resolving key is stored. So allow
7503 	 * static random and public addresses here.
7504 	 */
7505 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7506 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7507 		ev.store_hint = 0x00;
7508 	else
7509 		ev.store_hint = persistent;
7510 
7511 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7512 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7513 	ev.key.type = csrk->type;
7514 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7515 
7516 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7517 }
7518 
7519 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7520 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7521 			 u16 max_interval, u16 latency, u16 timeout)
7522 {
7523 	struct mgmt_ev_new_conn_param ev;
7524 
7525 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7526 		return;
7527 
7528 	memset(&ev, 0, sizeof(ev));
7529 	bacpy(&ev.addr.bdaddr, bdaddr);
7530 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7531 	ev.store_hint = store_hint;
7532 	ev.min_interval = cpu_to_le16(min_interval);
7533 	ev.max_interval = cpu_to_le16(max_interval);
7534 	ev.latency = cpu_to_le16(latency);
7535 	ev.timeout = cpu_to_le16(timeout);
7536 
7537 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7538 }
7539 
7540 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7541 			   u32 flags, u8 *name, u8 name_len)
7542 {
7543 	char buf[512];
7544 	struct mgmt_ev_device_connected *ev = (void *) buf;
7545 	u16 eir_len = 0;
7546 
7547 	bacpy(&ev->addr.bdaddr, &conn->dst);
7548 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7549 
7550 	ev->flags = __cpu_to_le32(flags);
7551 
7552 	/* We must ensure that the EIR Data fields are ordered and
7553 	 * unique. Keep it simple for now and avoid the problem by not
7554 	 * adding any BR/EDR data to the LE adv.
7555 	 */
7556 	if (conn->le_adv_data_len > 0) {
7557 		memcpy(&ev->eir[eir_len],
7558 		       conn->le_adv_data, conn->le_adv_data_len);
7559 		eir_len = conn->le_adv_data_len;
7560 	} else {
7561 		if (name_len > 0)
7562 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7563 						  name, name_len);
7564 
7565 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7566 			eir_len = eir_append_data(ev->eir, eir_len,
7567 						  EIR_CLASS_OF_DEV,
7568 						  conn->dev_class, 3);
7569 	}
7570 
7571 	ev->eir_len = cpu_to_le16(eir_len);
7572 
7573 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7574 		    sizeof(*ev) + eir_len, NULL);
7575 }
7576 
7577 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7578 {
7579 	struct sock **sk = data;
7580 
7581 	cmd->cmd_complete(cmd, 0);
7582 
7583 	*sk = cmd->sk;
7584 	sock_hold(*sk);
7585 
7586 	mgmt_pending_remove(cmd);
7587 }
7588 
7589 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7590 {
7591 	struct hci_dev *hdev = data;
7592 	struct mgmt_cp_unpair_device *cp = cmd->param;
7593 
7594 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7595 
7596 	cmd->cmd_complete(cmd, 0);
7597 	mgmt_pending_remove(cmd);
7598 }
7599 
7600 bool mgmt_powering_down(struct hci_dev *hdev)
7601 {
7602 	struct mgmt_pending_cmd *cmd;
7603 	struct mgmt_mode *cp;
7604 
7605 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7606 	if (!cmd)
7607 		return false;
7608 
7609 	cp = cmd->param;
7610 	if (!cp->val)
7611 		return true;
7612 
7613 	return false;
7614 }
7615 
7616 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7617 			      u8 link_type, u8 addr_type, u8 reason,
7618 			      bool mgmt_connected)
7619 {
7620 	struct mgmt_ev_device_disconnected ev;
7621 	struct sock *sk = NULL;
7622 
7623 	/* The connection is still in hci_conn_hash so test for 1
7624 	 * instead of 0 to know if this is the last one.
7625 	 */
7626 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7627 		cancel_delayed_work(&hdev->power_off);
7628 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7629 	}
7630 
7631 	if (!mgmt_connected)
7632 		return;
7633 
7634 	if (link_type != ACL_LINK && link_type != LE_LINK)
7635 		return;
7636 
7637 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7638 
7639 	bacpy(&ev.addr.bdaddr, bdaddr);
7640 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7641 	ev.reason = reason;
7642 
7643 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7644 
7645 	if (sk)
7646 		sock_put(sk);
7647 
7648 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7649 			     hdev);
7650 }
7651 
7652 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7653 			    u8 link_type, u8 addr_type, u8 status)
7654 {
7655 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7656 	struct mgmt_cp_disconnect *cp;
7657 	struct mgmt_pending_cmd *cmd;
7658 
7659 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7660 			     hdev);
7661 
7662 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7663 	if (!cmd)
7664 		return;
7665 
7666 	cp = cmd->param;
7667 
7668 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7669 		return;
7670 
7671 	if (cp->addr.type != bdaddr_type)
7672 		return;
7673 
7674 	cmd->cmd_complete(cmd, mgmt_status(status));
7675 	mgmt_pending_remove(cmd);
7676 }
7677 
7678 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7679 			 u8 addr_type, u8 status)
7680 {
7681 	struct mgmt_ev_connect_failed ev;
7682 
7683 	/* The connection is still in hci_conn_hash so test for 1
7684 	 * instead of 0 to know if this is the last one.
7685 	 */
7686 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7687 		cancel_delayed_work(&hdev->power_off);
7688 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7689 	}
7690 
7691 	bacpy(&ev.addr.bdaddr, bdaddr);
7692 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7693 	ev.status = mgmt_status(status);
7694 
7695 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7696 }
7697 
7698 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7699 {
7700 	struct mgmt_ev_pin_code_request ev;
7701 
7702 	bacpy(&ev.addr.bdaddr, bdaddr);
7703 	ev.addr.type = BDADDR_BREDR;
7704 	ev.secure = secure;
7705 
7706 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7707 }
7708 
7709 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7710 				  u8 status)
7711 {
7712 	struct mgmt_pending_cmd *cmd;
7713 
7714 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7715 	if (!cmd)
7716 		return;
7717 
7718 	cmd->cmd_complete(cmd, mgmt_status(status));
7719 	mgmt_pending_remove(cmd);
7720 }
7721 
7722 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7723 				      u8 status)
7724 {
7725 	struct mgmt_pending_cmd *cmd;
7726 
7727 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7728 	if (!cmd)
7729 		return;
7730 
7731 	cmd->cmd_complete(cmd, mgmt_status(status));
7732 	mgmt_pending_remove(cmd);
7733 }
7734 
7735 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7736 			      u8 link_type, u8 addr_type, u32 value,
7737 			      u8 confirm_hint)
7738 {
7739 	struct mgmt_ev_user_confirm_request ev;
7740 
7741 	BT_DBG("%s", hdev->name);
7742 
7743 	bacpy(&ev.addr.bdaddr, bdaddr);
7744 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7745 	ev.confirm_hint = confirm_hint;
7746 	ev.value = cpu_to_le32(value);
7747 
7748 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7749 			  NULL);
7750 }
7751 
7752 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7753 			      u8 link_type, u8 addr_type)
7754 {
7755 	struct mgmt_ev_user_passkey_request ev;
7756 
7757 	BT_DBG("%s", hdev->name);
7758 
7759 	bacpy(&ev.addr.bdaddr, bdaddr);
7760 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7761 
7762 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7763 			  NULL);
7764 }
7765 
7766 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7767 				      u8 link_type, u8 addr_type, u8 status,
7768 				      u8 opcode)
7769 {
7770 	struct mgmt_pending_cmd *cmd;
7771 
7772 	cmd = pending_find(opcode, hdev);
7773 	if (!cmd)
7774 		return -ENOENT;
7775 
7776 	cmd->cmd_complete(cmd, mgmt_status(status));
7777 	mgmt_pending_remove(cmd);
7778 
7779 	return 0;
7780 }
7781 
7782 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7783 				     u8 link_type, u8 addr_type, u8 status)
7784 {
7785 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7786 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7787 }
7788 
7789 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7790 					 u8 link_type, u8 addr_type, u8 status)
7791 {
7792 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7793 					  status,
7794 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7795 }
7796 
7797 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7798 				     u8 link_type, u8 addr_type, u8 status)
7799 {
7800 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7801 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7802 }
7803 
7804 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7805 					 u8 link_type, u8 addr_type, u8 status)
7806 {
7807 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7808 					  status,
7809 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7810 }
7811 
7812 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7813 			     u8 link_type, u8 addr_type, u32 passkey,
7814 			     u8 entered)
7815 {
7816 	struct mgmt_ev_passkey_notify ev;
7817 
7818 	BT_DBG("%s", hdev->name);
7819 
7820 	bacpy(&ev.addr.bdaddr, bdaddr);
7821 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7822 	ev.passkey = __cpu_to_le32(passkey);
7823 	ev.entered = entered;
7824 
7825 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7826 }
7827 
7828 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7829 {
7830 	struct mgmt_ev_auth_failed ev;
7831 	struct mgmt_pending_cmd *cmd;
7832 	u8 status = mgmt_status(hci_status);
7833 
7834 	bacpy(&ev.addr.bdaddr, &conn->dst);
7835 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7836 	ev.status = status;
7837 
7838 	cmd = find_pairing(conn);
7839 
7840 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7841 		    cmd ? cmd->sk : NULL);
7842 
7843 	if (cmd) {
7844 		cmd->cmd_complete(cmd, status);
7845 		mgmt_pending_remove(cmd);
7846 	}
7847 }
7848 
7849 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7850 {
7851 	struct cmd_lookup match = { NULL, hdev };
7852 	bool changed;
7853 
7854 	if (status) {
7855 		u8 mgmt_err = mgmt_status(status);
7856 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7857 				     cmd_status_rsp, &mgmt_err);
7858 		return;
7859 	}
7860 
7861 	if (test_bit(HCI_AUTH, &hdev->flags))
7862 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7863 	else
7864 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7865 
7866 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7867 			     &match);
7868 
7869 	if (changed)
7870 		new_settings(hdev, match.sk);
7871 
7872 	if (match.sk)
7873 		sock_put(match.sk);
7874 }
7875 
7876 static void clear_eir(struct hci_request *req)
7877 {
7878 	struct hci_dev *hdev = req->hdev;
7879 	struct hci_cp_write_eir cp;
7880 
7881 	if (!lmp_ext_inq_capable(hdev))
7882 		return;
7883 
7884 	memset(hdev->eir, 0, sizeof(hdev->eir));
7885 
7886 	memset(&cp, 0, sizeof(cp));
7887 
7888 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7889 }
7890 
7891 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7892 {
7893 	struct cmd_lookup match = { NULL, hdev };
7894 	struct hci_request req;
7895 	bool changed = false;
7896 
7897 	if (status) {
7898 		u8 mgmt_err = mgmt_status(status);
7899 
7900 		if (enable && hci_dev_test_and_clear_flag(hdev,
7901 							  HCI_SSP_ENABLED)) {
7902 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7903 			new_settings(hdev, NULL);
7904 		}
7905 
7906 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7907 				     &mgmt_err);
7908 		return;
7909 	}
7910 
7911 	if (enable) {
7912 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7913 	} else {
7914 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7915 		if (!changed)
7916 			changed = hci_dev_test_and_clear_flag(hdev,
7917 							      HCI_HS_ENABLED);
7918 		else
7919 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7920 	}
7921 
7922 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7923 
7924 	if (changed)
7925 		new_settings(hdev, match.sk);
7926 
7927 	if (match.sk)
7928 		sock_put(match.sk);
7929 
7930 	hci_req_init(&req, hdev);
7931 
7932 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7933 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7934 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7935 				    sizeof(enable), &enable);
7936 		update_eir(&req);
7937 	} else {
7938 		clear_eir(&req);
7939 	}
7940 
7941 	hci_req_run(&req, NULL);
7942 }
7943 
7944 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7945 {
7946 	struct cmd_lookup *match = data;
7947 
7948 	if (match->sk == NULL) {
7949 		match->sk = cmd->sk;
7950 		sock_hold(match->sk);
7951 	}
7952 }
7953 
7954 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7955 				    u8 status)
7956 {
7957 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7958 
7959 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7960 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7961 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7962 
7963 	if (!status)
7964 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7965 				   dev_class, 3, NULL);
7966 
7967 	if (match.sk)
7968 		sock_put(match.sk);
7969 }
7970 
7971 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7972 {
7973 	struct mgmt_cp_set_local_name ev;
7974 	struct mgmt_pending_cmd *cmd;
7975 
7976 	if (status)
7977 		return;
7978 
7979 	memset(&ev, 0, sizeof(ev));
7980 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7981 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7982 
7983 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7984 	if (!cmd) {
7985 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7986 
7987 		/* If this is a HCI command related to powering on the
7988 		 * HCI dev don't send any mgmt signals.
7989 		 */
7990 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7991 			return;
7992 	}
7993 
7994 	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7995 			   cmd ? cmd->sk : NULL);
7996 }
7997 
7998 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7999 {
8000 	int i;
8001 
8002 	for (i = 0; i < uuid_count; i++) {
8003 		if (!memcmp(uuid, uuids[i], 16))
8004 			return true;
8005 	}
8006 
8007 	return false;
8008 }
8009 
8010 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8011 {
8012 	u16 parsed = 0;
8013 
8014 	while (parsed < eir_len) {
8015 		u8 field_len = eir[0];
8016 		u8 uuid[16];
8017 		int i;
8018 
8019 		if (field_len == 0)
8020 			break;
8021 
8022 		if (eir_len - parsed < field_len + 1)
8023 			break;
8024 
8025 		switch (eir[1]) {
8026 		case EIR_UUID16_ALL:
8027 		case EIR_UUID16_SOME:
8028 			for (i = 0; i + 3 <= field_len; i += 2) {
8029 				memcpy(uuid, bluetooth_base_uuid, 16);
8030 				uuid[13] = eir[i + 3];
8031 				uuid[12] = eir[i + 2];
8032 				if (has_uuid(uuid, uuid_count, uuids))
8033 					return true;
8034 			}
8035 			break;
8036 		case EIR_UUID32_ALL:
8037 		case EIR_UUID32_SOME:
8038 			for (i = 0; i + 5 <= field_len; i += 4) {
8039 				memcpy(uuid, bluetooth_base_uuid, 16);
8040 				uuid[15] = eir[i + 5];
8041 				uuid[14] = eir[i + 4];
8042 				uuid[13] = eir[i + 3];
8043 				uuid[12] = eir[i + 2];
8044 				if (has_uuid(uuid, uuid_count, uuids))
8045 					return true;
8046 			}
8047 			break;
8048 		case EIR_UUID128_ALL:
8049 		case EIR_UUID128_SOME:
8050 			for (i = 0; i + 17 <= field_len; i += 16) {
8051 				memcpy(uuid, eir + i + 2, 16);
8052 				if (has_uuid(uuid, uuid_count, uuids))
8053 					return true;
8054 			}
8055 			break;
8056 		}
8057 
8058 		parsed += field_len + 1;
8059 		eir += field_len + 1;
8060 	}
8061 
8062 	return false;
8063 }
8064 
8065 static void restart_le_scan(struct hci_dev *hdev)
8066 {
8067 	/* If controller is not scanning we are done. */
8068 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8069 		return;
8070 
8071 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8072 		       hdev->discovery.scan_start +
8073 		       hdev->discovery.scan_duration))
8074 		return;
8075 
8076 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8077 			   DISCOV_LE_RESTART_DELAY);
8078 }
8079 
8080 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8081 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8082 {
8083 	/* If a RSSI threshold has been specified, and
8084 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8085 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8086 	 * is set, let it through for further processing, as we might need to
8087 	 * restart the scan.
8088 	 *
8089 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8090 	 * the results are also dropped.
8091 	 */
8092 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8093 	    (rssi == HCI_RSSI_INVALID ||
8094 	    (rssi < hdev->discovery.rssi &&
8095 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8096 		return  false;
8097 
8098 	if (hdev->discovery.uuid_count != 0) {
8099 		/* If a list of UUIDs is provided in filter, results with no
8100 		 * matching UUID should be dropped.
8101 		 */
8102 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8103 				   hdev->discovery.uuids) &&
8104 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8105 				   hdev->discovery.uuid_count,
8106 				   hdev->discovery.uuids))
8107 			return false;
8108 	}
8109 
8110 	/* If duplicate filtering does not report RSSI changes, then restart
8111 	 * scanning to ensure updated result with updated RSSI values.
8112 	 */
8113 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8114 		restart_le_scan(hdev);
8115 
8116 		/* Validate RSSI value against the RSSI threshold once more. */
8117 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8118 		    rssi < hdev->discovery.rssi)
8119 			return false;
8120 	}
8121 
8122 	return true;
8123 }
8124 
8125 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8126 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8127 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8128 {
8129 	char buf[512];
8130 	struct mgmt_ev_device_found *ev = (void *)buf;
8131 	size_t ev_size;
8132 
8133 	/* Don't send events for a non-kernel initiated discovery. With
8134 	 * LE one exception is if we have pend_le_reports > 0 in which
8135 	 * case we're doing passive scanning and want these events.
8136 	 */
8137 	if (!hci_discovery_active(hdev)) {
8138 		if (link_type == ACL_LINK)
8139 			return;
8140 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8141 			return;
8142 	}
8143 
8144 	if (hdev->discovery.result_filtering) {
8145 		/* We are using service discovery */
8146 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8147 				     scan_rsp_len))
8148 			return;
8149 	}
8150 
8151 	/* Make sure that the buffer is big enough. The 5 extra bytes
8152 	 * are for the potential CoD field.
8153 	 */
8154 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8155 		return;
8156 
8157 	memset(buf, 0, sizeof(buf));
8158 
8159 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8160 	 * RSSI value was reported as 0 when not available. This behavior
8161 	 * is kept when using device discovery. This is required for full
8162 	 * backwards compatibility with the API.
8163 	 *
8164 	 * However when using service discovery, the value 127 will be
8165 	 * returned when the RSSI is not available.
8166 	 */
8167 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8168 	    link_type == ACL_LINK)
8169 		rssi = 0;
8170 
8171 	bacpy(&ev->addr.bdaddr, bdaddr);
8172 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8173 	ev->rssi = rssi;
8174 	ev->flags = cpu_to_le32(flags);
8175 
8176 	if (eir_len > 0)
8177 		/* Copy EIR or advertising data into event */
8178 		memcpy(ev->eir, eir, eir_len);
8179 
8180 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8181 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8182 					  dev_class, 3);
8183 
8184 	if (scan_rsp_len > 0)
8185 		/* Append scan response data to event */
8186 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8187 
8188 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8189 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8190 
8191 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8192 }
8193 
8194 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8195 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8196 {
8197 	struct mgmt_ev_device_found *ev;
8198 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8199 	u16 eir_len;
8200 
8201 	ev = (struct mgmt_ev_device_found *) buf;
8202 
8203 	memset(buf, 0, sizeof(buf));
8204 
8205 	bacpy(&ev->addr.bdaddr, bdaddr);
8206 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8207 	ev->rssi = rssi;
8208 
8209 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8210 				  name_len);
8211 
8212 	ev->eir_len = cpu_to_le16(eir_len);
8213 
8214 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8215 }
8216 
8217 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8218 {
8219 	struct mgmt_ev_discovering ev;
8220 
8221 	BT_DBG("%s discovering %u", hdev->name, discovering);
8222 
8223 	memset(&ev, 0, sizeof(ev));
8224 	ev.type = hdev->discovery.type;
8225 	ev.discovering = discovering;
8226 
8227 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8228 }
8229 
8230 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8231 {
8232 	BT_DBG("%s status %u", hdev->name, status);
8233 }
8234 
8235 void mgmt_reenable_advertising(struct hci_dev *hdev)
8236 {
8237 	struct hci_request req;
8238 
8239 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8240 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8241 		return;
8242 
8243 	hci_req_init(&req, hdev);
8244 	enable_advertising(&req);
8245 	hci_req_run(&req, adv_enable_complete);
8246 }
8247 
8248 static struct hci_mgmt_chan chan = {
8249 	.channel	= HCI_CHANNEL_CONTROL,
8250 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8251 	.handlers	= mgmt_handlers,
8252 	.hdev_init	= mgmt_init_hdev,
8253 };
8254 
8255 int mgmt_init(void)
8256 {
8257 	return hci_mgmt_chan_register(&chan);
8258 }
8259 
8260 void mgmt_exit(void)
8261 {
8262 	hci_mgmt_chan_unregister(&chan);
8263 }
8264