xref: /linux/net/bluetooth/mgmt.c (revision 3ce095c16263630dde46d6051854073edaacf3d7)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	9
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 };
106 
107 static const u16 mgmt_events[] = {
108 	MGMT_EV_CONTROLLER_ERROR,
109 	MGMT_EV_INDEX_ADDED,
110 	MGMT_EV_INDEX_REMOVED,
111 	MGMT_EV_NEW_SETTINGS,
112 	MGMT_EV_CLASS_OF_DEV_CHANGED,
113 	MGMT_EV_LOCAL_NAME_CHANGED,
114 	MGMT_EV_NEW_LINK_KEY,
115 	MGMT_EV_NEW_LONG_TERM_KEY,
116 	MGMT_EV_DEVICE_CONNECTED,
117 	MGMT_EV_DEVICE_DISCONNECTED,
118 	MGMT_EV_CONNECT_FAILED,
119 	MGMT_EV_PIN_CODE_REQUEST,
120 	MGMT_EV_USER_CONFIRM_REQUEST,
121 	MGMT_EV_USER_PASSKEY_REQUEST,
122 	MGMT_EV_AUTH_FAILED,
123 	MGMT_EV_DEVICE_FOUND,
124 	MGMT_EV_DISCOVERING,
125 	MGMT_EV_DEVICE_BLOCKED,
126 	MGMT_EV_DEVICE_UNBLOCKED,
127 	MGMT_EV_DEVICE_UNPAIRED,
128 	MGMT_EV_PASSKEY_NOTIFY,
129 	MGMT_EV_NEW_IRK,
130 	MGMT_EV_NEW_CSRK,
131 	MGMT_EV_DEVICE_ADDED,
132 	MGMT_EV_DEVICE_REMOVED,
133 	MGMT_EV_NEW_CONN_PARAM,
134 	MGMT_EV_UNCONF_INDEX_ADDED,
135 	MGMT_EV_UNCONF_INDEX_REMOVED,
136 	MGMT_EV_NEW_CONFIG_OPTIONS,
137 	MGMT_EV_EXT_INDEX_ADDED,
138 	MGMT_EV_EXT_INDEX_REMOVED,
139 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
140 	MGMT_EV_ADVERTISING_ADDED,
141 	MGMT_EV_ADVERTISING_REMOVED,
142 };
143 
144 static const u16 mgmt_untrusted_commands[] = {
145 	MGMT_OP_READ_INDEX_LIST,
146 	MGMT_OP_READ_INFO,
147 	MGMT_OP_READ_UNCONF_INDEX_LIST,
148 	MGMT_OP_READ_CONFIG_INFO,
149 	MGMT_OP_READ_EXT_INDEX_LIST,
150 };
151 
152 static const u16 mgmt_untrusted_events[] = {
153 	MGMT_EV_INDEX_ADDED,
154 	MGMT_EV_INDEX_REMOVED,
155 	MGMT_EV_NEW_SETTINGS,
156 	MGMT_EV_CLASS_OF_DEV_CHANGED,
157 	MGMT_EV_LOCAL_NAME_CHANGED,
158 	MGMT_EV_UNCONF_INDEX_ADDED,
159 	MGMT_EV_UNCONF_INDEX_REMOVED,
160 	MGMT_EV_NEW_CONFIG_OPTIONS,
161 	MGMT_EV_EXT_INDEX_ADDED,
162 	MGMT_EV_EXT_INDEX_REMOVED,
163 };
164 
165 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
166 
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
169 
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table[] = {
172 	MGMT_STATUS_SUCCESS,
173 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
174 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
175 	MGMT_STATUS_FAILED,		/* Hardware Failure */
176 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
177 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
178 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
179 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
180 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
181 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
182 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
183 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
184 	MGMT_STATUS_BUSY,		/* Command Disallowed */
185 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
186 	MGMT_STATUS_REJECTED,		/* Rejected Security */
187 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
188 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
189 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
190 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
191 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
192 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
193 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
194 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
195 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
196 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
197 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
199 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
200 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
201 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
202 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
203 	MGMT_STATUS_FAILED,		/* Unspecified Error */
204 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
205 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
206 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
207 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
208 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
209 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
210 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
211 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
212 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
213 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
214 	MGMT_STATUS_FAILED,		/* Transaction Collision */
215 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
216 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
217 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
218 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
219 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
220 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
221 	MGMT_STATUS_FAILED,		/* Slot Violation */
222 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
223 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
225 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
226 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
227 	MGMT_STATUS_BUSY,		/* Controller Busy */
228 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
229 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
230 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
231 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
232 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
233 };
234 
235 static u8 mgmt_status(u8 hci_status)
236 {
237 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
238 		return mgmt_status_table[hci_status];
239 
240 	return MGMT_STATUS_FAILED;
241 }
242 
243 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
244 			    u16 len, int flag)
245 {
246 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
247 			       flag, NULL);
248 }
249 
250 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
251 			      u16 len, int flag, struct sock *skip_sk)
252 {
253 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 			       flag, skip_sk);
255 }
256 
257 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
258 			      u16 len, struct sock *skip_sk)
259 {
260 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
262 }
263 
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 		      struct sock *skip_sk)
266 {
267 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 			       HCI_SOCK_TRUSTED, skip_sk);
269 }
270 
271 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
272 			u16 data_len)
273 {
274 	struct mgmt_rp_read_version rp;
275 
276 	BT_DBG("sock %p", sk);
277 
278 	rp.version = MGMT_VERSION;
279 	rp.revision = cpu_to_le16(MGMT_REVISION);
280 
281 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
282 				 &rp, sizeof(rp));
283 }
284 
285 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
286 			 u16 data_len)
287 {
288 	struct mgmt_rp_read_commands *rp;
289 	u16 num_commands, num_events;
290 	size_t rp_size;
291 	int i, err;
292 
293 	BT_DBG("sock %p", sk);
294 
295 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
296 		num_commands = ARRAY_SIZE(mgmt_commands);
297 		num_events = ARRAY_SIZE(mgmt_events);
298 	} else {
299 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
300 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
301 	}
302 
303 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
304 
305 	rp = kmalloc(rp_size, GFP_KERNEL);
306 	if (!rp)
307 		return -ENOMEM;
308 
309 	rp->num_commands = cpu_to_le16(num_commands);
310 	rp->num_events = cpu_to_le16(num_events);
311 
312 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
313 		__le16 *opcode = rp->opcodes;
314 
315 		for (i = 0; i < num_commands; i++, opcode++)
316 			put_unaligned_le16(mgmt_commands[i], opcode);
317 
318 		for (i = 0; i < num_events; i++, opcode++)
319 			put_unaligned_le16(mgmt_events[i], opcode);
320 	} else {
321 		__le16 *opcode = rp->opcodes;
322 
323 		for (i = 0; i < num_commands; i++, opcode++)
324 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
325 
326 		for (i = 0; i < num_events; i++, opcode++)
327 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
328 	}
329 
330 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
331 				rp, rp_size);
332 	kfree(rp);
333 
334 	return err;
335 }
336 
337 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
338 			   u16 data_len)
339 {
340 	struct mgmt_rp_read_index_list *rp;
341 	struct hci_dev *d;
342 	size_t rp_len;
343 	u16 count;
344 	int err;
345 
346 	BT_DBG("sock %p", sk);
347 
348 	read_lock(&hci_dev_list_lock);
349 
350 	count = 0;
351 	list_for_each_entry(d, &hci_dev_list, list) {
352 		if (d->dev_type == HCI_BREDR &&
353 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
354 			count++;
355 	}
356 
357 	rp_len = sizeof(*rp) + (2 * count);
358 	rp = kmalloc(rp_len, GFP_ATOMIC);
359 	if (!rp) {
360 		read_unlock(&hci_dev_list_lock);
361 		return -ENOMEM;
362 	}
363 
364 	count = 0;
365 	list_for_each_entry(d, &hci_dev_list, list) {
366 		if (hci_dev_test_flag(d, HCI_SETUP) ||
367 		    hci_dev_test_flag(d, HCI_CONFIG) ||
368 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
369 			continue;
370 
371 		/* Devices marked as raw-only are neither configured
372 		 * nor unconfigured controllers.
373 		 */
374 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
375 			continue;
376 
377 		if (d->dev_type == HCI_BREDR &&
378 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
379 			rp->index[count++] = cpu_to_le16(d->id);
380 			BT_DBG("Added hci%u", d->id);
381 		}
382 	}
383 
384 	rp->num_controllers = cpu_to_le16(count);
385 	rp_len = sizeof(*rp) + (2 * count);
386 
387 	read_unlock(&hci_dev_list_lock);
388 
389 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
390 				0, rp, rp_len);
391 
392 	kfree(rp);
393 
394 	return err;
395 }
396 
397 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
398 				  void *data, u16 data_len)
399 {
400 	struct mgmt_rp_read_unconf_index_list *rp;
401 	struct hci_dev *d;
402 	size_t rp_len;
403 	u16 count;
404 	int err;
405 
406 	BT_DBG("sock %p", sk);
407 
408 	read_lock(&hci_dev_list_lock);
409 
410 	count = 0;
411 	list_for_each_entry(d, &hci_dev_list, list) {
412 		if (d->dev_type == HCI_BREDR &&
413 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
414 			count++;
415 	}
416 
417 	rp_len = sizeof(*rp) + (2 * count);
418 	rp = kmalloc(rp_len, GFP_ATOMIC);
419 	if (!rp) {
420 		read_unlock(&hci_dev_list_lock);
421 		return -ENOMEM;
422 	}
423 
424 	count = 0;
425 	list_for_each_entry(d, &hci_dev_list, list) {
426 		if (hci_dev_test_flag(d, HCI_SETUP) ||
427 		    hci_dev_test_flag(d, HCI_CONFIG) ||
428 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
429 			continue;
430 
431 		/* Devices marked as raw-only are neither configured
432 		 * nor unconfigured controllers.
433 		 */
434 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
435 			continue;
436 
437 		if (d->dev_type == HCI_BREDR &&
438 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
439 			rp->index[count++] = cpu_to_le16(d->id);
440 			BT_DBG("Added hci%u", d->id);
441 		}
442 	}
443 
444 	rp->num_controllers = cpu_to_le16(count);
445 	rp_len = sizeof(*rp) + (2 * count);
446 
447 	read_unlock(&hci_dev_list_lock);
448 
449 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
450 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
451 
452 	kfree(rp);
453 
454 	return err;
455 }
456 
457 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
458 			       void *data, u16 data_len)
459 {
460 	struct mgmt_rp_read_ext_index_list *rp;
461 	struct hci_dev *d;
462 	size_t rp_len;
463 	u16 count;
464 	int err;
465 
466 	BT_DBG("sock %p", sk);
467 
468 	read_lock(&hci_dev_list_lock);
469 
470 	count = 0;
471 	list_for_each_entry(d, &hci_dev_list, list) {
472 		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
473 			count++;
474 	}
475 
476 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
477 	rp = kmalloc(rp_len, GFP_ATOMIC);
478 	if (!rp) {
479 		read_unlock(&hci_dev_list_lock);
480 		return -ENOMEM;
481 	}
482 
483 	count = 0;
484 	list_for_each_entry(d, &hci_dev_list, list) {
485 		if (hci_dev_test_flag(d, HCI_SETUP) ||
486 		    hci_dev_test_flag(d, HCI_CONFIG) ||
487 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
488 			continue;
489 
490 		/* Devices marked as raw-only are neither configured
491 		 * nor unconfigured controllers.
492 		 */
493 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
494 			continue;
495 
496 		if (d->dev_type == HCI_BREDR) {
497 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
498 				rp->entry[count].type = 0x01;
499 			else
500 				rp->entry[count].type = 0x00;
501 		} else if (d->dev_type == HCI_AMP) {
502 			rp->entry[count].type = 0x02;
503 		} else {
504 			continue;
505 		}
506 
507 		rp->entry[count].bus = d->bus;
508 		rp->entry[count++].index = cpu_to_le16(d->id);
509 		BT_DBG("Added hci%u", d->id);
510 	}
511 
512 	rp->num_controllers = cpu_to_le16(count);
513 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
514 
515 	read_unlock(&hci_dev_list_lock);
516 
517 	/* If this command is called at least once, then all the
518 	 * default index and unconfigured index events are disabled
519 	 * and from now on only extended index events are used.
520 	 */
521 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
522 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
523 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
524 
525 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
526 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
527 
528 	kfree(rp);
529 
530 	return err;
531 }
532 
533 static bool is_configured(struct hci_dev *hdev)
534 {
535 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
536 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
537 		return false;
538 
539 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
540 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
541 		return false;
542 
543 	return true;
544 }
545 
546 static __le32 get_missing_options(struct hci_dev *hdev)
547 {
548 	u32 options = 0;
549 
550 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
553 
554 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
556 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
557 
558 	return cpu_to_le32(options);
559 }
560 
561 static int new_options(struct hci_dev *hdev, struct sock *skip)
562 {
563 	__le32 options = get_missing_options(hdev);
564 
565 	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
566 				  sizeof(options), skip);
567 }
568 
569 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
570 {
571 	__le32 options = get_missing_options(hdev);
572 
573 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
574 				 sizeof(options));
575 }
576 
577 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
578 			    void *data, u16 data_len)
579 {
580 	struct mgmt_rp_read_config_info rp;
581 	u32 options = 0;
582 
583 	BT_DBG("sock %p %s", sk, hdev->name);
584 
585 	hci_dev_lock(hdev);
586 
587 	memset(&rp, 0, sizeof(rp));
588 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
589 
590 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
591 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
592 
593 	if (hdev->set_bdaddr)
594 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
595 
596 	rp.supported_options = cpu_to_le32(options);
597 	rp.missing_options = get_missing_options(hdev);
598 
599 	hci_dev_unlock(hdev);
600 
601 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
602 				 &rp, sizeof(rp));
603 }
604 
605 static u32 get_supported_settings(struct hci_dev *hdev)
606 {
607 	u32 settings = 0;
608 
609 	settings |= MGMT_SETTING_POWERED;
610 	settings |= MGMT_SETTING_BONDABLE;
611 	settings |= MGMT_SETTING_DEBUG_KEYS;
612 	settings |= MGMT_SETTING_CONNECTABLE;
613 	settings |= MGMT_SETTING_DISCOVERABLE;
614 
615 	if (lmp_bredr_capable(hdev)) {
616 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
617 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
618 		settings |= MGMT_SETTING_BREDR;
619 		settings |= MGMT_SETTING_LINK_SECURITY;
620 
621 		if (lmp_ssp_capable(hdev)) {
622 			settings |= MGMT_SETTING_SSP;
623 			settings |= MGMT_SETTING_HS;
624 		}
625 
626 		if (lmp_sc_capable(hdev))
627 			settings |= MGMT_SETTING_SECURE_CONN;
628 	}
629 
630 	if (lmp_le_capable(hdev)) {
631 		settings |= MGMT_SETTING_LE;
632 		settings |= MGMT_SETTING_ADVERTISING;
633 		settings |= MGMT_SETTING_SECURE_CONN;
634 		settings |= MGMT_SETTING_PRIVACY;
635 		settings |= MGMT_SETTING_STATIC_ADDRESS;
636 	}
637 
638 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
639 	    hdev->set_bdaddr)
640 		settings |= MGMT_SETTING_CONFIGURATION;
641 
642 	return settings;
643 }
644 
645 static u32 get_current_settings(struct hci_dev *hdev)
646 {
647 	u32 settings = 0;
648 
649 	if (hdev_is_powered(hdev))
650 		settings |= MGMT_SETTING_POWERED;
651 
652 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
653 		settings |= MGMT_SETTING_CONNECTABLE;
654 
655 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
656 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
657 
658 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
659 		settings |= MGMT_SETTING_DISCOVERABLE;
660 
661 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
662 		settings |= MGMT_SETTING_BONDABLE;
663 
664 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
665 		settings |= MGMT_SETTING_BREDR;
666 
667 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
668 		settings |= MGMT_SETTING_LE;
669 
670 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
671 		settings |= MGMT_SETTING_LINK_SECURITY;
672 
673 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
674 		settings |= MGMT_SETTING_SSP;
675 
676 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
677 		settings |= MGMT_SETTING_HS;
678 
679 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
680 		settings |= MGMT_SETTING_ADVERTISING;
681 
682 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
683 		settings |= MGMT_SETTING_SECURE_CONN;
684 
685 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
686 		settings |= MGMT_SETTING_DEBUG_KEYS;
687 
688 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
689 		settings |= MGMT_SETTING_PRIVACY;
690 
691 	/* The current setting for static address has two purposes. The
692 	 * first is to indicate if the static address will be used and
693 	 * the second is to indicate if it is actually set.
694 	 *
695 	 * This means if the static address is not configured, this flag
696 	 * will never be set. If the address is configured, then if the
697 	 * address is actually used decides if the flag is set or not.
698 	 *
699 	 * For single mode LE only controllers and dual-mode controllers
700 	 * with BR/EDR disabled, the existence of the static address will
701 	 * be evaluated.
702 	 */
703 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
704 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
705 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
706 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
707 			settings |= MGMT_SETTING_STATIC_ADDRESS;
708 	}
709 
710 	return settings;
711 }
712 
713 #define PNP_INFO_SVCLASS_ID		0x1200
714 
715 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
716 {
717 	u8 *ptr = data, *uuids_start = NULL;
718 	struct bt_uuid *uuid;
719 
720 	if (len < 4)
721 		return ptr;
722 
723 	list_for_each_entry(uuid, &hdev->uuids, list) {
724 		u16 uuid16;
725 
726 		if (uuid->size != 16)
727 			continue;
728 
729 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
730 		if (uuid16 < 0x1100)
731 			continue;
732 
733 		if (uuid16 == PNP_INFO_SVCLASS_ID)
734 			continue;
735 
736 		if (!uuids_start) {
737 			uuids_start = ptr;
738 			uuids_start[0] = 1;
739 			uuids_start[1] = EIR_UUID16_ALL;
740 			ptr += 2;
741 		}
742 
743 		/* Stop if not enough space to put next UUID */
744 		if ((ptr - data) + sizeof(u16) > len) {
745 			uuids_start[1] = EIR_UUID16_SOME;
746 			break;
747 		}
748 
749 		*ptr++ = (uuid16 & 0x00ff);
750 		*ptr++ = (uuid16 & 0xff00) >> 8;
751 		uuids_start[0] += sizeof(uuid16);
752 	}
753 
754 	return ptr;
755 }
756 
757 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
758 {
759 	u8 *ptr = data, *uuids_start = NULL;
760 	struct bt_uuid *uuid;
761 
762 	if (len < 6)
763 		return ptr;
764 
765 	list_for_each_entry(uuid, &hdev->uuids, list) {
766 		if (uuid->size != 32)
767 			continue;
768 
769 		if (!uuids_start) {
770 			uuids_start = ptr;
771 			uuids_start[0] = 1;
772 			uuids_start[1] = EIR_UUID32_ALL;
773 			ptr += 2;
774 		}
775 
776 		/* Stop if not enough space to put next UUID */
777 		if ((ptr - data) + sizeof(u32) > len) {
778 			uuids_start[1] = EIR_UUID32_SOME;
779 			break;
780 		}
781 
782 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
783 		ptr += sizeof(u32);
784 		uuids_start[0] += sizeof(u32);
785 	}
786 
787 	return ptr;
788 }
789 
790 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
791 {
792 	u8 *ptr = data, *uuids_start = NULL;
793 	struct bt_uuid *uuid;
794 
795 	if (len < 18)
796 		return ptr;
797 
798 	list_for_each_entry(uuid, &hdev->uuids, list) {
799 		if (uuid->size != 128)
800 			continue;
801 
802 		if (!uuids_start) {
803 			uuids_start = ptr;
804 			uuids_start[0] = 1;
805 			uuids_start[1] = EIR_UUID128_ALL;
806 			ptr += 2;
807 		}
808 
809 		/* Stop if not enough space to put next UUID */
810 		if ((ptr - data) + 16 > len) {
811 			uuids_start[1] = EIR_UUID128_SOME;
812 			break;
813 		}
814 
815 		memcpy(ptr, uuid->uuid, 16);
816 		ptr += 16;
817 		uuids_start[0] += 16;
818 	}
819 
820 	return ptr;
821 }
822 
823 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
824 {
825 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
826 }
827 
828 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
829 						  struct hci_dev *hdev,
830 						  const void *data)
831 {
832 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
833 }
834 
835 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
836 {
837 	u8 ad_len = 0;
838 	size_t name_len;
839 
840 	name_len = strlen(hdev->dev_name);
841 	if (name_len > 0) {
842 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
843 
844 		if (name_len > max_len) {
845 			name_len = max_len;
846 			ptr[1] = EIR_NAME_SHORT;
847 		} else
848 			ptr[1] = EIR_NAME_COMPLETE;
849 
850 		ptr[0] = name_len + 1;
851 
852 		memcpy(ptr + 2, hdev->dev_name, name_len);
853 
854 		ad_len += (name_len + 2);
855 		ptr += (name_len + 2);
856 	}
857 
858 	return ad_len;
859 }
860 
861 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
862 {
863 	/* TODO: Set the appropriate entries based on advertising instance flags
864 	 * here once flags other than 0 are supported.
865 	 */
866 	memcpy(ptr, hdev->adv_instance.scan_rsp_data,
867 	       hdev->adv_instance.scan_rsp_len);
868 
869 	return hdev->adv_instance.scan_rsp_len;
870 }
871 
872 static void update_scan_rsp_data_for_instance(struct hci_request *req,
873 					      u8 instance)
874 {
875 	struct hci_dev *hdev = req->hdev;
876 	struct hci_cp_le_set_scan_rsp_data cp;
877 	u8 len;
878 
879 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
880 		return;
881 
882 	memset(&cp, 0, sizeof(cp));
883 
884 	if (instance)
885 		len = create_instance_scan_rsp_data(hdev, cp.data);
886 	else
887 		len = create_default_scan_rsp_data(hdev, cp.data);
888 
889 	if (hdev->scan_rsp_data_len == len &&
890 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
891 		return;
892 
893 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
894 	hdev->scan_rsp_data_len = len;
895 
896 	cp.length = len;
897 
898 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
899 }
900 
901 static void update_scan_rsp_data(struct hci_request *req)
902 {
903 	struct hci_dev *hdev = req->hdev;
904 	u8 instance;
905 
906 	/* The "Set Advertising" setting supersedes the "Add Advertising"
907 	 * setting. Here we set the scan response data based on which
908 	 * setting was set. When neither apply, default to the global settings,
909 	 * represented by instance "0".
910 	 */
911 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
912 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
913 		instance = 0x01;
914 	else
915 		instance = 0x00;
916 
917 	update_scan_rsp_data_for_instance(req, instance);
918 }
919 
920 static u8 get_adv_discov_flags(struct hci_dev *hdev)
921 {
922 	struct mgmt_pending_cmd *cmd;
923 
924 	/* If there's a pending mgmt command the flags will not yet have
925 	 * their final values, so check for this first.
926 	 */
927 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
928 	if (cmd) {
929 		struct mgmt_mode *cp = cmd->param;
930 		if (cp->val == 0x01)
931 			return LE_AD_GENERAL;
932 		else if (cp->val == 0x02)
933 			return LE_AD_LIMITED;
934 	} else {
935 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
936 			return LE_AD_LIMITED;
937 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
938 			return LE_AD_GENERAL;
939 	}
940 
941 	return 0;
942 }
943 
944 static u8 get_current_adv_instance(struct hci_dev *hdev)
945 {
946 	/* The "Set Advertising" setting supersedes the "Add Advertising"
947 	 * setting. Here we set the advertising data based on which
948 	 * setting was set. When neither apply, default to the global settings,
949 	 * represented by instance "0".
950 	 */
951 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
952 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 		return 0x01;
954 
955 	return 0x00;
956 }
957 
958 static bool get_connectable(struct hci_dev *hdev)
959 {
960 	struct mgmt_pending_cmd *cmd;
961 
962 	/* If there's a pending mgmt command the flag will not yet have
963 	 * it's final value, so check for this first.
964 	 */
965 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
966 	if (cmd) {
967 		struct mgmt_mode *cp = cmd->param;
968 
969 		return cp->val;
970 	}
971 
972 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
973 }
974 
975 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
976 {
977 	u32 flags;
978 
979 	if (instance > 0x01)
980 		return 0;
981 
982 	if (instance == 0x01)
983 		return hdev->adv_instance.flags;
984 
985 	/* Instance 0 always manages the "Tx Power" and "Flags" fields */
986 	flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
987 
988 	/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting corresponds
989 	 * to the "connectable" instance flag.
990 	 */
991 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
992 		flags |= MGMT_ADV_FLAG_CONNECTABLE;
993 
994 	return flags;
995 }
996 
997 static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
998 {
999 	/* Ignore instance 0 and other unsupported instances */
1000 	if (instance != 0x01)
1001 		return 0;
1002 
1003 	/* TODO: Take into account the "appearance" and "local-name" flags here.
1004 	 * These are currently being ignored as they are not supported.
1005 	 */
1006 	return hdev->adv_instance.scan_rsp_len;
1007 }
1008 
1009 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1010 {
1011 	u8 ad_len = 0, flags = 0;
1012 	u32 instance_flags = get_adv_instance_flags(hdev, instance);
1013 
1014 	/* The Add Advertising command allows userspace to set both the general
1015 	 * and limited discoverable flags.
1016 	 */
1017 	if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1018 		flags |= LE_AD_GENERAL;
1019 
1020 	if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1021 		flags |= LE_AD_LIMITED;
1022 
1023 	if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1024 		/* If a discovery flag wasn't provided, simply use the global
1025 		 * settings.
1026 		 */
1027 		if (!flags)
1028 			flags |= get_adv_discov_flags(hdev);
1029 
1030 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1031 			flags |= LE_AD_NO_BREDR;
1032 
1033 		/* If flags would still be empty, then there is no need to
1034 		 * include the "Flags" AD field".
1035 		 */
1036 		if (flags) {
1037 			ptr[0] = 0x02;
1038 			ptr[1] = EIR_FLAGS;
1039 			ptr[2] = flags;
1040 
1041 			ad_len += 3;
1042 			ptr += 3;
1043 		}
1044 	}
1045 
1046 	if (instance) {
1047 		memcpy(ptr, hdev->adv_instance.adv_data,
1048 		       hdev->adv_instance.adv_data_len);
1049 
1050 		ad_len += hdev->adv_instance.adv_data_len;
1051 		ptr += hdev->adv_instance.adv_data_len;
1052 	}
1053 
1054 	/* Provide Tx Power only if we can provide a valid value for it */
1055 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056 	    (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1057 		ptr[0] = 0x02;
1058 		ptr[1] = EIR_TX_POWER;
1059 		ptr[2] = (u8)hdev->adv_tx_power;
1060 
1061 		ad_len += 3;
1062 		ptr += 3;
1063 	}
1064 
1065 	return ad_len;
1066 }
1067 
1068 static void update_adv_data_for_instance(struct hci_request *req, u8 instance)
1069 {
1070 	struct hci_dev *hdev = req->hdev;
1071 	struct hci_cp_le_set_adv_data cp;
1072 	u8 len;
1073 
1074 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075 		return;
1076 
1077 	memset(&cp, 0, sizeof(cp));
1078 
1079 	len = create_instance_adv_data(hdev, instance, cp.data);
1080 
1081 	/* There's nothing to do if the data hasn't changed */
1082 	if (hdev->adv_data_len == len &&
1083 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1084 		return;
1085 
1086 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087 	hdev->adv_data_len = len;
1088 
1089 	cp.length = len;
1090 
1091 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1092 }
1093 
1094 static void update_adv_data(struct hci_request *req)
1095 {
1096 	struct hci_dev *hdev = req->hdev;
1097 	u8 instance = get_current_adv_instance(hdev);
1098 
1099 	update_adv_data_for_instance(req, instance);
1100 }
1101 
1102 int mgmt_update_adv_data(struct hci_dev *hdev)
1103 {
1104 	struct hci_request req;
1105 
1106 	hci_req_init(&req, hdev);
1107 	update_adv_data(&req);
1108 
1109 	return hci_req_run(&req, NULL);
1110 }
1111 
1112 static void create_eir(struct hci_dev *hdev, u8 *data)
1113 {
1114 	u8 *ptr = data;
1115 	size_t name_len;
1116 
1117 	name_len = strlen(hdev->dev_name);
1118 
1119 	if (name_len > 0) {
1120 		/* EIR Data type */
1121 		if (name_len > 48) {
1122 			name_len = 48;
1123 			ptr[1] = EIR_NAME_SHORT;
1124 		} else
1125 			ptr[1] = EIR_NAME_COMPLETE;
1126 
1127 		/* EIR Data length */
1128 		ptr[0] = name_len + 1;
1129 
1130 		memcpy(ptr + 2, hdev->dev_name, name_len);
1131 
1132 		ptr += (name_len + 2);
1133 	}
1134 
1135 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1136 		ptr[0] = 2;
1137 		ptr[1] = EIR_TX_POWER;
1138 		ptr[2] = (u8) hdev->inq_tx_power;
1139 
1140 		ptr += 3;
1141 	}
1142 
1143 	if (hdev->devid_source > 0) {
1144 		ptr[0] = 9;
1145 		ptr[1] = EIR_DEVICE_ID;
1146 
1147 		put_unaligned_le16(hdev->devid_source, ptr + 2);
1148 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1149 		put_unaligned_le16(hdev->devid_product, ptr + 6);
1150 		put_unaligned_le16(hdev->devid_version, ptr + 8);
1151 
1152 		ptr += 10;
1153 	}
1154 
1155 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1156 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1157 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1158 }
1159 
1160 static void update_eir(struct hci_request *req)
1161 {
1162 	struct hci_dev *hdev = req->hdev;
1163 	struct hci_cp_write_eir cp;
1164 
1165 	if (!hdev_is_powered(hdev))
1166 		return;
1167 
1168 	if (!lmp_ext_inq_capable(hdev))
1169 		return;
1170 
1171 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1172 		return;
1173 
1174 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1175 		return;
1176 
1177 	memset(&cp, 0, sizeof(cp));
1178 
1179 	create_eir(hdev, cp.data);
1180 
1181 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1182 		return;
1183 
1184 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
1185 
1186 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1187 }
1188 
1189 static u8 get_service_classes(struct hci_dev *hdev)
1190 {
1191 	struct bt_uuid *uuid;
1192 	u8 val = 0;
1193 
1194 	list_for_each_entry(uuid, &hdev->uuids, list)
1195 		val |= uuid->svc_hint;
1196 
1197 	return val;
1198 }
1199 
1200 static void update_class(struct hci_request *req)
1201 {
1202 	struct hci_dev *hdev = req->hdev;
1203 	u8 cod[3];
1204 
1205 	BT_DBG("%s", hdev->name);
1206 
1207 	if (!hdev_is_powered(hdev))
1208 		return;
1209 
1210 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1211 		return;
1212 
1213 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1214 		return;
1215 
1216 	cod[0] = hdev->minor_class;
1217 	cod[1] = hdev->major_class;
1218 	cod[2] = get_service_classes(hdev);
1219 
1220 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1221 		cod[1] |= 0x20;
1222 
1223 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1224 		return;
1225 
1226 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1227 }
1228 
1229 static void disable_advertising(struct hci_request *req)
1230 {
1231 	u8 enable = 0x00;
1232 
1233 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1234 }
1235 
1236 static void enable_advertising(struct hci_request *req)
1237 {
1238 	struct hci_dev *hdev = req->hdev;
1239 	struct hci_cp_le_set_adv_param cp;
1240 	u8 own_addr_type, enable = 0x01;
1241 	bool connectable;
1242 	u8 instance;
1243 	u32 flags;
1244 
1245 	if (hci_conn_num(hdev, LE_LINK) > 0)
1246 		return;
1247 
1248 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1249 		disable_advertising(req);
1250 
1251 	/* Clear the HCI_LE_ADV bit temporarily so that the
1252 	 * hci_update_random_address knows that it's safe to go ahead
1253 	 * and write a new random address. The flag will be set back on
1254 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1255 	 */
1256 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1257 
1258 	instance = get_current_adv_instance(hdev);
1259 	flags = get_adv_instance_flags(hdev, instance);
1260 
1261 	/* If the "connectable" instance flag was not set, then choose between
1262 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1263 	 */
1264 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1265 		      get_connectable(hdev);
1266 
1267 	/* Set require_privacy to true only when non-connectable
1268 	 * advertising is used. In that case it is fine to use a
1269 	 * non-resolvable private address.
1270 	 */
1271 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1272 		return;
1273 
1274 	memset(&cp, 0, sizeof(cp));
1275 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1276 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1277 
1278 	if (connectable)
1279 		cp.type = LE_ADV_IND;
1280 	else if (get_adv_instance_scan_rsp_len(hdev, instance))
1281 		cp.type = LE_ADV_SCAN_IND;
1282 	else
1283 		cp.type = LE_ADV_NONCONN_IND;
1284 
1285 	cp.own_address_type = own_addr_type;
1286 	cp.channel_map = hdev->le_adv_channel_map;
1287 
1288 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1289 
1290 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1291 }
1292 
1293 static void service_cache_off(struct work_struct *work)
1294 {
1295 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1296 					    service_cache.work);
1297 	struct hci_request req;
1298 
1299 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1300 		return;
1301 
1302 	hci_req_init(&req, hdev);
1303 
1304 	hci_dev_lock(hdev);
1305 
1306 	update_eir(&req);
1307 	update_class(&req);
1308 
1309 	hci_dev_unlock(hdev);
1310 
1311 	hci_req_run(&req, NULL);
1312 }
1313 
1314 static void rpa_expired(struct work_struct *work)
1315 {
1316 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1317 					    rpa_expired.work);
1318 	struct hci_request req;
1319 
1320 	BT_DBG("");
1321 
1322 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1323 
1324 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1325 		return;
1326 
1327 	/* The generation of a new RPA and programming it into the
1328 	 * controller happens in the enable_advertising() function.
1329 	 */
1330 	hci_req_init(&req, hdev);
1331 	enable_advertising(&req);
1332 	hci_req_run(&req, NULL);
1333 }
1334 
1335 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1336 {
1337 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1338 		return;
1339 
1340 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1341 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1342 
1343 	/* Non-mgmt controlled devices get this bit set
1344 	 * implicitly so that pairing works for them, however
1345 	 * for mgmt we require user-space to explicitly enable
1346 	 * it
1347 	 */
1348 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1349 }
1350 
1351 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1352 				void *data, u16 data_len)
1353 {
1354 	struct mgmt_rp_read_info rp;
1355 
1356 	BT_DBG("sock %p %s", sk, hdev->name);
1357 
1358 	hci_dev_lock(hdev);
1359 
1360 	memset(&rp, 0, sizeof(rp));
1361 
1362 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1363 
1364 	rp.version = hdev->hci_ver;
1365 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1366 
1367 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1368 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1369 
1370 	memcpy(rp.dev_class, hdev->dev_class, 3);
1371 
1372 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1373 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1374 
1375 	hci_dev_unlock(hdev);
1376 
1377 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1378 				 sizeof(rp));
1379 }
1380 
1381 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1382 {
1383 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1384 
1385 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1386 				 sizeof(settings));
1387 }
1388 
1389 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1390 {
1391 	BT_DBG("%s status 0x%02x", hdev->name, status);
1392 
1393 	if (hci_conn_count(hdev) == 0) {
1394 		cancel_delayed_work(&hdev->power_off);
1395 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1396 	}
1397 }
1398 
1399 static bool hci_stop_discovery(struct hci_request *req)
1400 {
1401 	struct hci_dev *hdev = req->hdev;
1402 	struct hci_cp_remote_name_req_cancel cp;
1403 	struct inquiry_entry *e;
1404 
1405 	switch (hdev->discovery.state) {
1406 	case DISCOVERY_FINDING:
1407 		if (test_bit(HCI_INQUIRY, &hdev->flags))
1408 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1409 
1410 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1411 			cancel_delayed_work(&hdev->le_scan_disable);
1412 			hci_req_add_le_scan_disable(req);
1413 		}
1414 
1415 		return true;
1416 
1417 	case DISCOVERY_RESOLVING:
1418 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1419 						     NAME_PENDING);
1420 		if (!e)
1421 			break;
1422 
1423 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1424 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1425 			    &cp);
1426 
1427 		return true;
1428 
1429 	default:
1430 		/* Passive scanning */
1431 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1432 			hci_req_add_le_scan_disable(req);
1433 			return true;
1434 		}
1435 
1436 		break;
1437 	}
1438 
1439 	return false;
1440 }
1441 
1442 static void advertising_added(struct sock *sk, struct hci_dev *hdev,
1443 			      u8 instance)
1444 {
1445 	struct mgmt_ev_advertising_added ev;
1446 
1447 	ev.instance = instance;
1448 
1449 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1450 }
1451 
1452 static void advertising_removed(struct sock *sk, struct hci_dev *hdev,
1453 				u8 instance)
1454 {
1455 	struct mgmt_ev_advertising_removed ev;
1456 
1457 	ev.instance = instance;
1458 
1459 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1460 }
1461 
1462 static void clear_adv_instance(struct hci_dev *hdev)
1463 {
1464 	struct hci_request req;
1465 
1466 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1467 		return;
1468 
1469 	if (hdev->adv_instance.timeout)
1470 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
1471 
1472 	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
1473 	advertising_removed(NULL, hdev, 1);
1474 	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1475 
1476 	if (!hdev_is_powered(hdev) ||
1477 	    hci_dev_test_flag(hdev, HCI_ADVERTISING))
1478 		return;
1479 
1480 	hci_req_init(&req, hdev);
1481 	disable_advertising(&req);
1482 	hci_req_run(&req, NULL);
1483 }
1484 
1485 static int clean_up_hci_state(struct hci_dev *hdev)
1486 {
1487 	struct hci_request req;
1488 	struct hci_conn *conn;
1489 	bool discov_stopped;
1490 	int err;
1491 
1492 	hci_req_init(&req, hdev);
1493 
1494 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1495 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1496 		u8 scan = 0x00;
1497 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1498 	}
1499 
1500 	if (hdev->adv_instance.timeout)
1501 		clear_adv_instance(hdev);
1502 
1503 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1504 		disable_advertising(&req);
1505 
1506 	discov_stopped = hci_stop_discovery(&req);
1507 
1508 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1509 		struct hci_cp_disconnect dc;
1510 		struct hci_cp_reject_conn_req rej;
1511 
1512 		switch (conn->state) {
1513 		case BT_CONNECTED:
1514 		case BT_CONFIG:
1515 			dc.handle = cpu_to_le16(conn->handle);
1516 			dc.reason = 0x15; /* Terminated due to Power Off */
1517 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1518 			break;
1519 		case BT_CONNECT:
1520 			if (conn->type == LE_LINK)
1521 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1522 					    0, NULL);
1523 			else if (conn->type == ACL_LINK)
1524 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1525 					    6, &conn->dst);
1526 			break;
1527 		case BT_CONNECT2:
1528 			bacpy(&rej.bdaddr, &conn->dst);
1529 			rej.reason = 0x15; /* Terminated due to Power Off */
1530 			if (conn->type == ACL_LINK)
1531 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1532 					    sizeof(rej), &rej);
1533 			else if (conn->type == SCO_LINK)
1534 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1535 					    sizeof(rej), &rej);
1536 			break;
1537 		}
1538 	}
1539 
1540 	err = hci_req_run(&req, clean_up_hci_complete);
1541 	if (!err && discov_stopped)
1542 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1543 
1544 	return err;
1545 }
1546 
1547 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1548 		       u16 len)
1549 {
1550 	struct mgmt_mode *cp = data;
1551 	struct mgmt_pending_cmd *cmd;
1552 	int err;
1553 
1554 	BT_DBG("request for %s", hdev->name);
1555 
1556 	if (cp->val != 0x00 && cp->val != 0x01)
1557 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1558 				       MGMT_STATUS_INVALID_PARAMS);
1559 
1560 	hci_dev_lock(hdev);
1561 
1562 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1563 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1564 				      MGMT_STATUS_BUSY);
1565 		goto failed;
1566 	}
1567 
1568 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1569 		cancel_delayed_work(&hdev->power_off);
1570 
1571 		if (cp->val) {
1572 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1573 					 data, len);
1574 			err = mgmt_powered(hdev, 1);
1575 			goto failed;
1576 		}
1577 	}
1578 
1579 	if (!!cp->val == hdev_is_powered(hdev)) {
1580 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1581 		goto failed;
1582 	}
1583 
1584 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1585 	if (!cmd) {
1586 		err = -ENOMEM;
1587 		goto failed;
1588 	}
1589 
1590 	if (cp->val) {
1591 		queue_work(hdev->req_workqueue, &hdev->power_on);
1592 		err = 0;
1593 	} else {
1594 		/* Disconnect connections, stop scans, etc */
1595 		err = clean_up_hci_state(hdev);
1596 		if (!err)
1597 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1598 					   HCI_POWER_OFF_TIMEOUT);
1599 
1600 		/* ENODATA means there were no HCI commands queued */
1601 		if (err == -ENODATA) {
1602 			cancel_delayed_work(&hdev->power_off);
1603 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1604 			err = 0;
1605 		}
1606 	}
1607 
1608 failed:
1609 	hci_dev_unlock(hdev);
1610 	return err;
1611 }
1612 
1613 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1614 {
1615 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1616 
1617 	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1618 				  sizeof(ev), skip);
1619 }
1620 
1621 int mgmt_new_settings(struct hci_dev *hdev)
1622 {
1623 	return new_settings(hdev, NULL);
1624 }
1625 
1626 struct cmd_lookup {
1627 	struct sock *sk;
1628 	struct hci_dev *hdev;
1629 	u8 mgmt_status;
1630 };
1631 
1632 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1633 {
1634 	struct cmd_lookup *match = data;
1635 
1636 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1637 
1638 	list_del(&cmd->list);
1639 
1640 	if (match->sk == NULL) {
1641 		match->sk = cmd->sk;
1642 		sock_hold(match->sk);
1643 	}
1644 
1645 	mgmt_pending_free(cmd);
1646 }
1647 
1648 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1649 {
1650 	u8 *status = data;
1651 
1652 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1653 	mgmt_pending_remove(cmd);
1654 }
1655 
1656 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1657 {
1658 	if (cmd->cmd_complete) {
1659 		u8 *status = data;
1660 
1661 		cmd->cmd_complete(cmd, *status);
1662 		mgmt_pending_remove(cmd);
1663 
1664 		return;
1665 	}
1666 
1667 	cmd_status_rsp(cmd, data);
1668 }
1669 
1670 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1671 {
1672 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1673 				 cmd->param, cmd->param_len);
1674 }
1675 
1676 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1677 {
1678 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1679 				 cmd->param, sizeof(struct mgmt_addr_info));
1680 }
1681 
1682 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1683 {
1684 	if (!lmp_bredr_capable(hdev))
1685 		return MGMT_STATUS_NOT_SUPPORTED;
1686 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1687 		return MGMT_STATUS_REJECTED;
1688 	else
1689 		return MGMT_STATUS_SUCCESS;
1690 }
1691 
1692 static u8 mgmt_le_support(struct hci_dev *hdev)
1693 {
1694 	if (!lmp_le_capable(hdev))
1695 		return MGMT_STATUS_NOT_SUPPORTED;
1696 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1697 		return MGMT_STATUS_REJECTED;
1698 	else
1699 		return MGMT_STATUS_SUCCESS;
1700 }
1701 
1702 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1703 				      u16 opcode)
1704 {
1705 	struct mgmt_pending_cmd *cmd;
1706 	struct mgmt_mode *cp;
1707 	struct hci_request req;
1708 	bool changed;
1709 
1710 	BT_DBG("status 0x%02x", status);
1711 
1712 	hci_dev_lock(hdev);
1713 
1714 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1715 	if (!cmd)
1716 		goto unlock;
1717 
1718 	if (status) {
1719 		u8 mgmt_err = mgmt_status(status);
1720 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1721 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1722 		goto remove_cmd;
1723 	}
1724 
1725 	cp = cmd->param;
1726 	if (cp->val) {
1727 		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1728 
1729 		if (hdev->discov_timeout > 0) {
1730 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1731 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1732 					   to);
1733 		}
1734 	} else {
1735 		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1736 	}
1737 
1738 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1739 
1740 	if (changed)
1741 		new_settings(hdev, cmd->sk);
1742 
1743 	/* When the discoverable mode gets changed, make sure
1744 	 * that class of device has the limited discoverable
1745 	 * bit correctly set. Also update page scan based on whitelist
1746 	 * entries.
1747 	 */
1748 	hci_req_init(&req, hdev);
1749 	__hci_update_page_scan(&req);
1750 	update_class(&req);
1751 	hci_req_run(&req, NULL);
1752 
1753 remove_cmd:
1754 	mgmt_pending_remove(cmd);
1755 
1756 unlock:
1757 	hci_dev_unlock(hdev);
1758 }
1759 
1760 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 			    u16 len)
1762 {
1763 	struct mgmt_cp_set_discoverable *cp = data;
1764 	struct mgmt_pending_cmd *cmd;
1765 	struct hci_request req;
1766 	u16 timeout;
1767 	u8 scan;
1768 	int err;
1769 
1770 	BT_DBG("request for %s", hdev->name);
1771 
1772 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1773 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1775 				       MGMT_STATUS_REJECTED);
1776 
1777 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1778 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1779 				       MGMT_STATUS_INVALID_PARAMS);
1780 
1781 	timeout = __le16_to_cpu(cp->timeout);
1782 
1783 	/* Disabling discoverable requires that no timeout is set,
1784 	 * and enabling limited discoverable requires a timeout.
1785 	 */
1786 	if ((cp->val == 0x00 && timeout > 0) ||
1787 	    (cp->val == 0x02 && timeout == 0))
1788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1789 				       MGMT_STATUS_INVALID_PARAMS);
1790 
1791 	hci_dev_lock(hdev);
1792 
1793 	if (!hdev_is_powered(hdev) && timeout > 0) {
1794 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1795 				      MGMT_STATUS_NOT_POWERED);
1796 		goto failed;
1797 	}
1798 
1799 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1800 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1801 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1802 				      MGMT_STATUS_BUSY);
1803 		goto failed;
1804 	}
1805 
1806 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1807 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1808 				      MGMT_STATUS_REJECTED);
1809 		goto failed;
1810 	}
1811 
1812 	if (!hdev_is_powered(hdev)) {
1813 		bool changed = false;
1814 
1815 		/* Setting limited discoverable when powered off is
1816 		 * not a valid operation since it requires a timeout
1817 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1818 		 */
1819 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1820 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1821 			changed = true;
1822 		}
1823 
1824 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1825 		if (err < 0)
1826 			goto failed;
1827 
1828 		if (changed)
1829 			err = new_settings(hdev, sk);
1830 
1831 		goto failed;
1832 	}
1833 
1834 	/* If the current mode is the same, then just update the timeout
1835 	 * value with the new value. And if only the timeout gets updated,
1836 	 * then no need for any HCI transactions.
1837 	 */
1838 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1839 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1840 						   HCI_LIMITED_DISCOVERABLE)) {
1841 		cancel_delayed_work(&hdev->discov_off);
1842 		hdev->discov_timeout = timeout;
1843 
1844 		if (cp->val && hdev->discov_timeout > 0) {
1845 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1846 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1847 					   to);
1848 		}
1849 
1850 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1851 		goto failed;
1852 	}
1853 
1854 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1855 	if (!cmd) {
1856 		err = -ENOMEM;
1857 		goto failed;
1858 	}
1859 
1860 	/* Cancel any potential discoverable timeout that might be
1861 	 * still active and store new timeout value. The arming of
1862 	 * the timeout happens in the complete handler.
1863 	 */
1864 	cancel_delayed_work(&hdev->discov_off);
1865 	hdev->discov_timeout = timeout;
1866 
1867 	/* Limited discoverable mode */
1868 	if (cp->val == 0x02)
1869 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1870 	else
1871 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1872 
1873 	hci_req_init(&req, hdev);
1874 
1875 	/* The procedure for LE-only controllers is much simpler - just
1876 	 * update the advertising data.
1877 	 */
1878 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1879 		goto update_ad;
1880 
1881 	scan = SCAN_PAGE;
1882 
1883 	if (cp->val) {
1884 		struct hci_cp_write_current_iac_lap hci_cp;
1885 
1886 		if (cp->val == 0x02) {
1887 			/* Limited discoverable mode */
1888 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1889 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1890 			hci_cp.iac_lap[1] = 0x8b;
1891 			hci_cp.iac_lap[2] = 0x9e;
1892 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1893 			hci_cp.iac_lap[4] = 0x8b;
1894 			hci_cp.iac_lap[5] = 0x9e;
1895 		} else {
1896 			/* General discoverable mode */
1897 			hci_cp.num_iac = 1;
1898 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1899 			hci_cp.iac_lap[1] = 0x8b;
1900 			hci_cp.iac_lap[2] = 0x9e;
1901 		}
1902 
1903 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1904 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1905 
1906 		scan |= SCAN_INQUIRY;
1907 	} else {
1908 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1909 	}
1910 
1911 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1912 
1913 update_ad:
1914 	update_adv_data(&req);
1915 
1916 	err = hci_req_run(&req, set_discoverable_complete);
1917 	if (err < 0)
1918 		mgmt_pending_remove(cmd);
1919 
1920 failed:
1921 	hci_dev_unlock(hdev);
1922 	return err;
1923 }
1924 
1925 static void write_fast_connectable(struct hci_request *req, bool enable)
1926 {
1927 	struct hci_dev *hdev = req->hdev;
1928 	struct hci_cp_write_page_scan_activity acp;
1929 	u8 type;
1930 
1931 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1932 		return;
1933 
1934 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1935 		return;
1936 
1937 	if (enable) {
1938 		type = PAGE_SCAN_TYPE_INTERLACED;
1939 
1940 		/* 160 msec page scan interval */
1941 		acp.interval = cpu_to_le16(0x0100);
1942 	} else {
1943 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1944 
1945 		/* default 1.28 sec page scan */
1946 		acp.interval = cpu_to_le16(0x0800);
1947 	}
1948 
1949 	acp.window = cpu_to_le16(0x0012);
1950 
1951 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1952 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1953 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1954 			    sizeof(acp), &acp);
1955 
1956 	if (hdev->page_scan_type != type)
1957 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1958 }
1959 
1960 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1961 				     u16 opcode)
1962 {
1963 	struct mgmt_pending_cmd *cmd;
1964 	struct mgmt_mode *cp;
1965 	bool conn_changed, discov_changed;
1966 
1967 	BT_DBG("status 0x%02x", status);
1968 
1969 	hci_dev_lock(hdev);
1970 
1971 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1972 	if (!cmd)
1973 		goto unlock;
1974 
1975 	if (status) {
1976 		u8 mgmt_err = mgmt_status(status);
1977 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1978 		goto remove_cmd;
1979 	}
1980 
1981 	cp = cmd->param;
1982 	if (cp->val) {
1983 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1984 							  HCI_CONNECTABLE);
1985 		discov_changed = false;
1986 	} else {
1987 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1988 							   HCI_CONNECTABLE);
1989 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1990 							     HCI_DISCOVERABLE);
1991 	}
1992 
1993 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1994 
1995 	if (conn_changed || discov_changed) {
1996 		new_settings(hdev, cmd->sk);
1997 		hci_update_page_scan(hdev);
1998 		if (discov_changed)
1999 			mgmt_update_adv_data(hdev);
2000 		hci_update_background_scan(hdev);
2001 	}
2002 
2003 remove_cmd:
2004 	mgmt_pending_remove(cmd);
2005 
2006 unlock:
2007 	hci_dev_unlock(hdev);
2008 }
2009 
2010 static int set_connectable_update_settings(struct hci_dev *hdev,
2011 					   struct sock *sk, u8 val)
2012 {
2013 	bool changed = false;
2014 	int err;
2015 
2016 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
2017 		changed = true;
2018 
2019 	if (val) {
2020 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
2021 	} else {
2022 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
2023 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2024 	}
2025 
2026 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
2027 	if (err < 0)
2028 		return err;
2029 
2030 	if (changed) {
2031 		hci_update_page_scan(hdev);
2032 		hci_update_background_scan(hdev);
2033 		return new_settings(hdev, sk);
2034 	}
2035 
2036 	return 0;
2037 }
2038 
2039 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
2040 			   u16 len)
2041 {
2042 	struct mgmt_mode *cp = data;
2043 	struct mgmt_pending_cmd *cmd;
2044 	struct hci_request req;
2045 	u8 scan;
2046 	int err;
2047 
2048 	BT_DBG("request for %s", hdev->name);
2049 
2050 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2051 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2052 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2053 				       MGMT_STATUS_REJECTED);
2054 
2055 	if (cp->val != 0x00 && cp->val != 0x01)
2056 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2057 				       MGMT_STATUS_INVALID_PARAMS);
2058 
2059 	hci_dev_lock(hdev);
2060 
2061 	if (!hdev_is_powered(hdev)) {
2062 		err = set_connectable_update_settings(hdev, sk, cp->val);
2063 		goto failed;
2064 	}
2065 
2066 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2067 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2068 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2069 				      MGMT_STATUS_BUSY);
2070 		goto failed;
2071 	}
2072 
2073 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2074 	if (!cmd) {
2075 		err = -ENOMEM;
2076 		goto failed;
2077 	}
2078 
2079 	hci_req_init(&req, hdev);
2080 
2081 	/* If BR/EDR is not enabled and we disable advertising as a
2082 	 * by-product of disabling connectable, we need to update the
2083 	 * advertising flags.
2084 	 */
2085 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2086 		if (!cp->val) {
2087 			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2088 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2089 		}
2090 		update_adv_data(&req);
2091 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2092 		if (cp->val) {
2093 			scan = SCAN_PAGE;
2094 		} else {
2095 			/* If we don't have any whitelist entries just
2096 			 * disable all scanning. If there are entries
2097 			 * and we had both page and inquiry scanning
2098 			 * enabled then fall back to only page scanning.
2099 			 * Otherwise no changes are needed.
2100 			 */
2101 			if (list_empty(&hdev->whitelist))
2102 				scan = SCAN_DISABLED;
2103 			else if (test_bit(HCI_ISCAN, &hdev->flags))
2104 				scan = SCAN_PAGE;
2105 			else
2106 				goto no_scan_update;
2107 
2108 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2109 			    hdev->discov_timeout > 0)
2110 				cancel_delayed_work(&hdev->discov_off);
2111 		}
2112 
2113 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2114 	}
2115 
2116 no_scan_update:
2117 	/* Update the advertising parameters if necessary */
2118 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2119 	    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
2120 		enable_advertising(&req);
2121 
2122 	err = hci_req_run(&req, set_connectable_complete);
2123 	if (err < 0) {
2124 		mgmt_pending_remove(cmd);
2125 		if (err == -ENODATA)
2126 			err = set_connectable_update_settings(hdev, sk,
2127 							      cp->val);
2128 		goto failed;
2129 	}
2130 
2131 failed:
2132 	hci_dev_unlock(hdev);
2133 	return err;
2134 }
2135 
2136 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2137 			u16 len)
2138 {
2139 	struct mgmt_mode *cp = data;
2140 	bool changed;
2141 	int err;
2142 
2143 	BT_DBG("request for %s", hdev->name);
2144 
2145 	if (cp->val != 0x00 && cp->val != 0x01)
2146 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2147 				       MGMT_STATUS_INVALID_PARAMS);
2148 
2149 	hci_dev_lock(hdev);
2150 
2151 	if (cp->val)
2152 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2153 	else
2154 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2155 
2156 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2157 	if (err < 0)
2158 		goto unlock;
2159 
2160 	if (changed)
2161 		err = new_settings(hdev, sk);
2162 
2163 unlock:
2164 	hci_dev_unlock(hdev);
2165 	return err;
2166 }
2167 
2168 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2169 			     u16 len)
2170 {
2171 	struct mgmt_mode *cp = data;
2172 	struct mgmt_pending_cmd *cmd;
2173 	u8 val, status;
2174 	int err;
2175 
2176 	BT_DBG("request for %s", hdev->name);
2177 
2178 	status = mgmt_bredr_support(hdev);
2179 	if (status)
2180 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2181 				       status);
2182 
2183 	if (cp->val != 0x00 && cp->val != 0x01)
2184 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2185 				       MGMT_STATUS_INVALID_PARAMS);
2186 
2187 	hci_dev_lock(hdev);
2188 
2189 	if (!hdev_is_powered(hdev)) {
2190 		bool changed = false;
2191 
2192 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2193 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2194 			changed = true;
2195 		}
2196 
2197 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2198 		if (err < 0)
2199 			goto failed;
2200 
2201 		if (changed)
2202 			err = new_settings(hdev, sk);
2203 
2204 		goto failed;
2205 	}
2206 
2207 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2208 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2209 				      MGMT_STATUS_BUSY);
2210 		goto failed;
2211 	}
2212 
2213 	val = !!cp->val;
2214 
2215 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2216 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2217 		goto failed;
2218 	}
2219 
2220 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2221 	if (!cmd) {
2222 		err = -ENOMEM;
2223 		goto failed;
2224 	}
2225 
2226 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2227 	if (err < 0) {
2228 		mgmt_pending_remove(cmd);
2229 		goto failed;
2230 	}
2231 
2232 failed:
2233 	hci_dev_unlock(hdev);
2234 	return err;
2235 }
2236 
2237 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2238 {
2239 	struct mgmt_mode *cp = data;
2240 	struct mgmt_pending_cmd *cmd;
2241 	u8 status;
2242 	int err;
2243 
2244 	BT_DBG("request for %s", hdev->name);
2245 
2246 	status = mgmt_bredr_support(hdev);
2247 	if (status)
2248 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2249 
2250 	if (!lmp_ssp_capable(hdev))
2251 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2252 				       MGMT_STATUS_NOT_SUPPORTED);
2253 
2254 	if (cp->val != 0x00 && cp->val != 0x01)
2255 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2256 				       MGMT_STATUS_INVALID_PARAMS);
2257 
2258 	hci_dev_lock(hdev);
2259 
2260 	if (!hdev_is_powered(hdev)) {
2261 		bool changed;
2262 
2263 		if (cp->val) {
2264 			changed = !hci_dev_test_and_set_flag(hdev,
2265 							     HCI_SSP_ENABLED);
2266 		} else {
2267 			changed = hci_dev_test_and_clear_flag(hdev,
2268 							      HCI_SSP_ENABLED);
2269 			if (!changed)
2270 				changed = hci_dev_test_and_clear_flag(hdev,
2271 								      HCI_HS_ENABLED);
2272 			else
2273 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2274 		}
2275 
2276 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2277 		if (err < 0)
2278 			goto failed;
2279 
2280 		if (changed)
2281 			err = new_settings(hdev, sk);
2282 
2283 		goto failed;
2284 	}
2285 
2286 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2287 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2288 				      MGMT_STATUS_BUSY);
2289 		goto failed;
2290 	}
2291 
2292 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2293 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2294 		goto failed;
2295 	}
2296 
2297 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2298 	if (!cmd) {
2299 		err = -ENOMEM;
2300 		goto failed;
2301 	}
2302 
2303 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2304 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2305 			     sizeof(cp->val), &cp->val);
2306 
2307 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2308 	if (err < 0) {
2309 		mgmt_pending_remove(cmd);
2310 		goto failed;
2311 	}
2312 
2313 failed:
2314 	hci_dev_unlock(hdev);
2315 	return err;
2316 }
2317 
2318 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2319 {
2320 	struct mgmt_mode *cp = data;
2321 	bool changed;
2322 	u8 status;
2323 	int err;
2324 
2325 	BT_DBG("request for %s", hdev->name);
2326 
2327 	status = mgmt_bredr_support(hdev);
2328 	if (status)
2329 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2330 
2331 	if (!lmp_ssp_capable(hdev))
2332 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2333 				       MGMT_STATUS_NOT_SUPPORTED);
2334 
2335 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2336 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2337 				       MGMT_STATUS_REJECTED);
2338 
2339 	if (cp->val != 0x00 && cp->val != 0x01)
2340 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2341 				       MGMT_STATUS_INVALID_PARAMS);
2342 
2343 	hci_dev_lock(hdev);
2344 
2345 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2346 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2347 				      MGMT_STATUS_BUSY);
2348 		goto unlock;
2349 	}
2350 
2351 	if (cp->val) {
2352 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2353 	} else {
2354 		if (hdev_is_powered(hdev)) {
2355 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2356 					      MGMT_STATUS_REJECTED);
2357 			goto unlock;
2358 		}
2359 
2360 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2361 	}
2362 
2363 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2364 	if (err < 0)
2365 		goto unlock;
2366 
2367 	if (changed)
2368 		err = new_settings(hdev, sk);
2369 
2370 unlock:
2371 	hci_dev_unlock(hdev);
2372 	return err;
2373 }
2374 
2375 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2376 {
2377 	struct cmd_lookup match = { NULL, hdev };
2378 
2379 	hci_dev_lock(hdev);
2380 
2381 	if (status) {
2382 		u8 mgmt_err = mgmt_status(status);
2383 
2384 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2385 				     &mgmt_err);
2386 		goto unlock;
2387 	}
2388 
2389 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2390 
2391 	new_settings(hdev, match.sk);
2392 
2393 	if (match.sk)
2394 		sock_put(match.sk);
2395 
2396 	/* Make sure the controller has a good default for
2397 	 * advertising data. Restrict the update to when LE
2398 	 * has actually been enabled. During power on, the
2399 	 * update in powered_update_hci will take care of it.
2400 	 */
2401 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2402 		struct hci_request req;
2403 
2404 		hci_req_init(&req, hdev);
2405 		update_adv_data(&req);
2406 		update_scan_rsp_data(&req);
2407 		__hci_update_background_scan(&req);
2408 		hci_req_run(&req, NULL);
2409 	}
2410 
2411 unlock:
2412 	hci_dev_unlock(hdev);
2413 }
2414 
2415 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2416 {
2417 	struct mgmt_mode *cp = data;
2418 	struct hci_cp_write_le_host_supported hci_cp;
2419 	struct mgmt_pending_cmd *cmd;
2420 	struct hci_request req;
2421 	int err;
2422 	u8 val, enabled;
2423 
2424 	BT_DBG("request for %s", hdev->name);
2425 
2426 	if (!lmp_le_capable(hdev))
2427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2428 				       MGMT_STATUS_NOT_SUPPORTED);
2429 
2430 	if (cp->val != 0x00 && cp->val != 0x01)
2431 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2432 				       MGMT_STATUS_INVALID_PARAMS);
2433 
2434 	/* Bluetooth single mode LE only controllers or dual-mode
2435 	 * controllers configured as LE only devices, do not allow
2436 	 * switching LE off. These have either LE enabled explicitly
2437 	 * or BR/EDR has been previously switched off.
2438 	 *
2439 	 * When trying to enable an already enabled LE, then gracefully
2440 	 * send a positive response. Trying to disable it however will
2441 	 * result into rejection.
2442 	 */
2443 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2444 		if (cp->val == 0x01)
2445 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2446 
2447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2448 				       MGMT_STATUS_REJECTED);
2449 	}
2450 
2451 	hci_dev_lock(hdev);
2452 
2453 	val = !!cp->val;
2454 	enabled = lmp_host_le_capable(hdev);
2455 
2456 	if (!hdev_is_powered(hdev) || val == enabled) {
2457 		bool changed = false;
2458 
2459 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2460 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2461 			changed = true;
2462 		}
2463 
2464 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2465 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2466 			changed = true;
2467 		}
2468 
2469 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2470 		if (err < 0)
2471 			goto unlock;
2472 
2473 		if (changed)
2474 			err = new_settings(hdev, sk);
2475 
2476 		goto unlock;
2477 	}
2478 
2479 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2480 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2481 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2482 				      MGMT_STATUS_BUSY);
2483 		goto unlock;
2484 	}
2485 
2486 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2487 	if (!cmd) {
2488 		err = -ENOMEM;
2489 		goto unlock;
2490 	}
2491 
2492 	hci_req_init(&req, hdev);
2493 
2494 	memset(&hci_cp, 0, sizeof(hci_cp));
2495 
2496 	if (val) {
2497 		hci_cp.le = val;
2498 		hci_cp.simul = 0x00;
2499 	} else {
2500 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2501 			disable_advertising(&req);
2502 	}
2503 
2504 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2505 		    &hci_cp);
2506 
2507 	err = hci_req_run(&req, le_enable_complete);
2508 	if (err < 0)
2509 		mgmt_pending_remove(cmd);
2510 
2511 unlock:
2512 	hci_dev_unlock(hdev);
2513 	return err;
2514 }
2515 
2516 /* This is a helper function to test for pending mgmt commands that can
2517  * cause CoD or EIR HCI commands. We can only allow one such pending
2518  * mgmt command at a time since otherwise we cannot easily track what
2519  * the current values are, will be, and based on that calculate if a new
2520  * HCI command needs to be sent and if yes with what value.
2521  */
2522 static bool pending_eir_or_class(struct hci_dev *hdev)
2523 {
2524 	struct mgmt_pending_cmd *cmd;
2525 
2526 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2527 		switch (cmd->opcode) {
2528 		case MGMT_OP_ADD_UUID:
2529 		case MGMT_OP_REMOVE_UUID:
2530 		case MGMT_OP_SET_DEV_CLASS:
2531 		case MGMT_OP_SET_POWERED:
2532 			return true;
2533 		}
2534 	}
2535 
2536 	return false;
2537 }
2538 
2539 static const u8 bluetooth_base_uuid[] = {
2540 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2541 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2542 };
2543 
2544 static u8 get_uuid_size(const u8 *uuid)
2545 {
2546 	u32 val;
2547 
2548 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2549 		return 128;
2550 
2551 	val = get_unaligned_le32(&uuid[12]);
2552 	if (val > 0xffff)
2553 		return 32;
2554 
2555 	return 16;
2556 }
2557 
2558 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2559 {
2560 	struct mgmt_pending_cmd *cmd;
2561 
2562 	hci_dev_lock(hdev);
2563 
2564 	cmd = pending_find(mgmt_op, hdev);
2565 	if (!cmd)
2566 		goto unlock;
2567 
2568 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2569 			  mgmt_status(status), hdev->dev_class, 3);
2570 
2571 	mgmt_pending_remove(cmd);
2572 
2573 unlock:
2574 	hci_dev_unlock(hdev);
2575 }
2576 
2577 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2578 {
2579 	BT_DBG("status 0x%02x", status);
2580 
2581 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2582 }
2583 
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586 	struct mgmt_cp_add_uuid *cp = data;
2587 	struct mgmt_pending_cmd *cmd;
2588 	struct hci_request req;
2589 	struct bt_uuid *uuid;
2590 	int err;
2591 
2592 	BT_DBG("request for %s", hdev->name);
2593 
2594 	hci_dev_lock(hdev);
2595 
2596 	if (pending_eir_or_class(hdev)) {
2597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2598 				      MGMT_STATUS_BUSY);
2599 		goto failed;
2600 	}
2601 
2602 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2603 	if (!uuid) {
2604 		err = -ENOMEM;
2605 		goto failed;
2606 	}
2607 
2608 	memcpy(uuid->uuid, cp->uuid, 16);
2609 	uuid->svc_hint = cp->svc_hint;
2610 	uuid->size = get_uuid_size(cp->uuid);
2611 
2612 	list_add_tail(&uuid->list, &hdev->uuids);
2613 
2614 	hci_req_init(&req, hdev);
2615 
2616 	update_class(&req);
2617 	update_eir(&req);
2618 
2619 	err = hci_req_run(&req, add_uuid_complete);
2620 	if (err < 0) {
2621 		if (err != -ENODATA)
2622 			goto failed;
2623 
2624 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2625 					hdev->dev_class, 3);
2626 		goto failed;
2627 	}
2628 
2629 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2630 	if (!cmd) {
2631 		err = -ENOMEM;
2632 		goto failed;
2633 	}
2634 
2635 	err = 0;
2636 
2637 failed:
2638 	hci_dev_unlock(hdev);
2639 	return err;
2640 }
2641 
2642 static bool enable_service_cache(struct hci_dev *hdev)
2643 {
2644 	if (!hdev_is_powered(hdev))
2645 		return false;
2646 
2647 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2648 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2649 				   CACHE_TIMEOUT);
2650 		return true;
2651 	}
2652 
2653 	return false;
2654 }
2655 
2656 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2657 {
2658 	BT_DBG("status 0x%02x", status);
2659 
2660 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2661 }
2662 
2663 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2664 		       u16 len)
2665 {
2666 	struct mgmt_cp_remove_uuid *cp = data;
2667 	struct mgmt_pending_cmd *cmd;
2668 	struct bt_uuid *match, *tmp;
2669 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2670 	struct hci_request req;
2671 	int err, found;
2672 
2673 	BT_DBG("request for %s", hdev->name);
2674 
2675 	hci_dev_lock(hdev);
2676 
2677 	if (pending_eir_or_class(hdev)) {
2678 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2679 				      MGMT_STATUS_BUSY);
2680 		goto unlock;
2681 	}
2682 
2683 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2684 		hci_uuids_clear(hdev);
2685 
2686 		if (enable_service_cache(hdev)) {
2687 			err = mgmt_cmd_complete(sk, hdev->id,
2688 						MGMT_OP_REMOVE_UUID,
2689 						0, hdev->dev_class, 3);
2690 			goto unlock;
2691 		}
2692 
2693 		goto update_class;
2694 	}
2695 
2696 	found = 0;
2697 
2698 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2699 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2700 			continue;
2701 
2702 		list_del(&match->list);
2703 		kfree(match);
2704 		found++;
2705 	}
2706 
2707 	if (found == 0) {
2708 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2709 				      MGMT_STATUS_INVALID_PARAMS);
2710 		goto unlock;
2711 	}
2712 
2713 update_class:
2714 	hci_req_init(&req, hdev);
2715 
2716 	update_class(&req);
2717 	update_eir(&req);
2718 
2719 	err = hci_req_run(&req, remove_uuid_complete);
2720 	if (err < 0) {
2721 		if (err != -ENODATA)
2722 			goto unlock;
2723 
2724 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2725 					hdev->dev_class, 3);
2726 		goto unlock;
2727 	}
2728 
2729 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2730 	if (!cmd) {
2731 		err = -ENOMEM;
2732 		goto unlock;
2733 	}
2734 
2735 	err = 0;
2736 
2737 unlock:
2738 	hci_dev_unlock(hdev);
2739 	return err;
2740 }
2741 
2742 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2743 {
2744 	BT_DBG("status 0x%02x", status);
2745 
2746 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2747 }
2748 
2749 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2750 			 u16 len)
2751 {
2752 	struct mgmt_cp_set_dev_class *cp = data;
2753 	struct mgmt_pending_cmd *cmd;
2754 	struct hci_request req;
2755 	int err;
2756 
2757 	BT_DBG("request for %s", hdev->name);
2758 
2759 	if (!lmp_bredr_capable(hdev))
2760 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 				       MGMT_STATUS_NOT_SUPPORTED);
2762 
2763 	hci_dev_lock(hdev);
2764 
2765 	if (pending_eir_or_class(hdev)) {
2766 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2767 				      MGMT_STATUS_BUSY);
2768 		goto unlock;
2769 	}
2770 
2771 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2772 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2773 				      MGMT_STATUS_INVALID_PARAMS);
2774 		goto unlock;
2775 	}
2776 
2777 	hdev->major_class = cp->major;
2778 	hdev->minor_class = cp->minor;
2779 
2780 	if (!hdev_is_powered(hdev)) {
2781 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2782 					hdev->dev_class, 3);
2783 		goto unlock;
2784 	}
2785 
2786 	hci_req_init(&req, hdev);
2787 
2788 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2789 		hci_dev_unlock(hdev);
2790 		cancel_delayed_work_sync(&hdev->service_cache);
2791 		hci_dev_lock(hdev);
2792 		update_eir(&req);
2793 	}
2794 
2795 	update_class(&req);
2796 
2797 	err = hci_req_run(&req, set_class_complete);
2798 	if (err < 0) {
2799 		if (err != -ENODATA)
2800 			goto unlock;
2801 
2802 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2803 					hdev->dev_class, 3);
2804 		goto unlock;
2805 	}
2806 
2807 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2808 	if (!cmd) {
2809 		err = -ENOMEM;
2810 		goto unlock;
2811 	}
2812 
2813 	err = 0;
2814 
2815 unlock:
2816 	hci_dev_unlock(hdev);
2817 	return err;
2818 }
2819 
2820 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2821 			  u16 len)
2822 {
2823 	struct mgmt_cp_load_link_keys *cp = data;
2824 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2825 				   sizeof(struct mgmt_link_key_info));
2826 	u16 key_count, expected_len;
2827 	bool changed;
2828 	int i;
2829 
2830 	BT_DBG("request for %s", hdev->name);
2831 
2832 	if (!lmp_bredr_capable(hdev))
2833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 				       MGMT_STATUS_NOT_SUPPORTED);
2835 
2836 	key_count = __le16_to_cpu(cp->key_count);
2837 	if (key_count > max_key_count) {
2838 		BT_ERR("load_link_keys: too big key_count value %u",
2839 		       key_count);
2840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2841 				       MGMT_STATUS_INVALID_PARAMS);
2842 	}
2843 
2844 	expected_len = sizeof(*cp) + key_count *
2845 					sizeof(struct mgmt_link_key_info);
2846 	if (expected_len != len) {
2847 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2848 		       expected_len, len);
2849 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2850 				       MGMT_STATUS_INVALID_PARAMS);
2851 	}
2852 
2853 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2855 				       MGMT_STATUS_INVALID_PARAMS);
2856 
2857 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2858 	       key_count);
2859 
2860 	for (i = 0; i < key_count; i++) {
2861 		struct mgmt_link_key_info *key = &cp->keys[i];
2862 
2863 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2864 			return mgmt_cmd_status(sk, hdev->id,
2865 					       MGMT_OP_LOAD_LINK_KEYS,
2866 					       MGMT_STATUS_INVALID_PARAMS);
2867 	}
2868 
2869 	hci_dev_lock(hdev);
2870 
2871 	hci_link_keys_clear(hdev);
2872 
2873 	if (cp->debug_keys)
2874 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2875 	else
2876 		changed = hci_dev_test_and_clear_flag(hdev,
2877 						      HCI_KEEP_DEBUG_KEYS);
2878 
2879 	if (changed)
2880 		new_settings(hdev, NULL);
2881 
2882 	for (i = 0; i < key_count; i++) {
2883 		struct mgmt_link_key_info *key = &cp->keys[i];
2884 
2885 		/* Always ignore debug keys and require a new pairing if
2886 		 * the user wants to use them.
2887 		 */
2888 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2889 			continue;
2890 
2891 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2892 				 key->type, key->pin_len, NULL);
2893 	}
2894 
2895 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2896 
2897 	hci_dev_unlock(hdev);
2898 
2899 	return 0;
2900 }
2901 
2902 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2903 			   u8 addr_type, struct sock *skip_sk)
2904 {
2905 	struct mgmt_ev_device_unpaired ev;
2906 
2907 	bacpy(&ev.addr.bdaddr, bdaddr);
2908 	ev.addr.type = addr_type;
2909 
2910 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2911 			  skip_sk);
2912 }
2913 
2914 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2915 			 u16 len)
2916 {
2917 	struct mgmt_cp_unpair_device *cp = data;
2918 	struct mgmt_rp_unpair_device rp;
2919 	struct hci_cp_disconnect dc;
2920 	struct mgmt_pending_cmd *cmd;
2921 	struct hci_conn *conn;
2922 	int err;
2923 
2924 	memset(&rp, 0, sizeof(rp));
2925 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2926 	rp.addr.type = cp->addr.type;
2927 
2928 	if (!bdaddr_type_is_valid(cp->addr.type))
2929 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2930 					 MGMT_STATUS_INVALID_PARAMS,
2931 					 &rp, sizeof(rp));
2932 
2933 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2934 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2935 					 MGMT_STATUS_INVALID_PARAMS,
2936 					 &rp, sizeof(rp));
2937 
2938 	hci_dev_lock(hdev);
2939 
2940 	if (!hdev_is_powered(hdev)) {
2941 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2942 					MGMT_STATUS_NOT_POWERED, &rp,
2943 					sizeof(rp));
2944 		goto unlock;
2945 	}
2946 
2947 	if (cp->addr.type == BDADDR_BREDR) {
2948 		/* If disconnection is requested, then look up the
2949 		 * connection. If the remote device is connected, it
2950 		 * will be later used to terminate the link.
2951 		 *
2952 		 * Setting it to NULL explicitly will cause no
2953 		 * termination of the link.
2954 		 */
2955 		if (cp->disconnect)
2956 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2957 						       &cp->addr.bdaddr);
2958 		else
2959 			conn = NULL;
2960 
2961 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2962 	} else {
2963 		u8 addr_type;
2964 
2965 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2966 					       &cp->addr.bdaddr);
2967 		if (conn) {
2968 			/* Defer clearing up the connection parameters
2969 			 * until closing to give a chance of keeping
2970 			 * them if a repairing happens.
2971 			 */
2972 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2973 
2974 			/* If disconnection is not requested, then
2975 			 * clear the connection variable so that the
2976 			 * link is not terminated.
2977 			 */
2978 			if (!cp->disconnect)
2979 				conn = NULL;
2980 		}
2981 
2982 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2983 			addr_type = ADDR_LE_DEV_PUBLIC;
2984 		else
2985 			addr_type = ADDR_LE_DEV_RANDOM;
2986 
2987 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2988 
2989 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2990 	}
2991 
2992 	if (err < 0) {
2993 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2994 					MGMT_STATUS_NOT_PAIRED, &rp,
2995 					sizeof(rp));
2996 		goto unlock;
2997 	}
2998 
2999 	/* If the connection variable is set, then termination of the
3000 	 * link is requested.
3001 	 */
3002 	if (!conn) {
3003 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3004 					&rp, sizeof(rp));
3005 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3006 		goto unlock;
3007 	}
3008 
3009 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3010 			       sizeof(*cp));
3011 	if (!cmd) {
3012 		err = -ENOMEM;
3013 		goto unlock;
3014 	}
3015 
3016 	cmd->cmd_complete = addr_cmd_complete;
3017 
3018 	dc.handle = cpu_to_le16(conn->handle);
3019 	dc.reason = 0x13; /* Remote User Terminated Connection */
3020 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
3021 	if (err < 0)
3022 		mgmt_pending_remove(cmd);
3023 
3024 unlock:
3025 	hci_dev_unlock(hdev);
3026 	return err;
3027 }
3028 
3029 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3030 		      u16 len)
3031 {
3032 	struct mgmt_cp_disconnect *cp = data;
3033 	struct mgmt_rp_disconnect rp;
3034 	struct mgmt_pending_cmd *cmd;
3035 	struct hci_conn *conn;
3036 	int err;
3037 
3038 	BT_DBG("");
3039 
3040 	memset(&rp, 0, sizeof(rp));
3041 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3042 	rp.addr.type = cp->addr.type;
3043 
3044 	if (!bdaddr_type_is_valid(cp->addr.type))
3045 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3046 					 MGMT_STATUS_INVALID_PARAMS,
3047 					 &rp, sizeof(rp));
3048 
3049 	hci_dev_lock(hdev);
3050 
3051 	if (!test_bit(HCI_UP, &hdev->flags)) {
3052 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3053 					MGMT_STATUS_NOT_POWERED, &rp,
3054 					sizeof(rp));
3055 		goto failed;
3056 	}
3057 
3058 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3059 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3060 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3061 		goto failed;
3062 	}
3063 
3064 	if (cp->addr.type == BDADDR_BREDR)
3065 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3066 					       &cp->addr.bdaddr);
3067 	else
3068 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3069 
3070 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3071 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3072 					MGMT_STATUS_NOT_CONNECTED, &rp,
3073 					sizeof(rp));
3074 		goto failed;
3075 	}
3076 
3077 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3078 	if (!cmd) {
3079 		err = -ENOMEM;
3080 		goto failed;
3081 	}
3082 
3083 	cmd->cmd_complete = generic_cmd_complete;
3084 
3085 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3086 	if (err < 0)
3087 		mgmt_pending_remove(cmd);
3088 
3089 failed:
3090 	hci_dev_unlock(hdev);
3091 	return err;
3092 }
3093 
3094 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3095 {
3096 	switch (link_type) {
3097 	case LE_LINK:
3098 		switch (addr_type) {
3099 		case ADDR_LE_DEV_PUBLIC:
3100 			return BDADDR_LE_PUBLIC;
3101 
3102 		default:
3103 			/* Fallback to LE Random address type */
3104 			return BDADDR_LE_RANDOM;
3105 		}
3106 
3107 	default:
3108 		/* Fallback to BR/EDR type */
3109 		return BDADDR_BREDR;
3110 	}
3111 }
3112 
3113 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3114 			   u16 data_len)
3115 {
3116 	struct mgmt_rp_get_connections *rp;
3117 	struct hci_conn *c;
3118 	size_t rp_len;
3119 	int err;
3120 	u16 i;
3121 
3122 	BT_DBG("");
3123 
3124 	hci_dev_lock(hdev);
3125 
3126 	if (!hdev_is_powered(hdev)) {
3127 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3128 				      MGMT_STATUS_NOT_POWERED);
3129 		goto unlock;
3130 	}
3131 
3132 	i = 0;
3133 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3134 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3135 			i++;
3136 	}
3137 
3138 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3139 	rp = kmalloc(rp_len, GFP_KERNEL);
3140 	if (!rp) {
3141 		err = -ENOMEM;
3142 		goto unlock;
3143 	}
3144 
3145 	i = 0;
3146 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3147 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3148 			continue;
3149 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3150 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3151 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3152 			continue;
3153 		i++;
3154 	}
3155 
3156 	rp->conn_count = cpu_to_le16(i);
3157 
3158 	/* Recalculate length in case of filtered SCO connections, etc */
3159 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3160 
3161 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3162 				rp_len);
3163 
3164 	kfree(rp);
3165 
3166 unlock:
3167 	hci_dev_unlock(hdev);
3168 	return err;
3169 }
3170 
3171 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3172 				   struct mgmt_cp_pin_code_neg_reply *cp)
3173 {
3174 	struct mgmt_pending_cmd *cmd;
3175 	int err;
3176 
3177 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3178 			       sizeof(*cp));
3179 	if (!cmd)
3180 		return -ENOMEM;
3181 
3182 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3183 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3184 	if (err < 0)
3185 		mgmt_pending_remove(cmd);
3186 
3187 	return err;
3188 }
3189 
3190 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3191 			  u16 len)
3192 {
3193 	struct hci_conn *conn;
3194 	struct mgmt_cp_pin_code_reply *cp = data;
3195 	struct hci_cp_pin_code_reply reply;
3196 	struct mgmt_pending_cmd *cmd;
3197 	int err;
3198 
3199 	BT_DBG("");
3200 
3201 	hci_dev_lock(hdev);
3202 
3203 	if (!hdev_is_powered(hdev)) {
3204 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3205 				      MGMT_STATUS_NOT_POWERED);
3206 		goto failed;
3207 	}
3208 
3209 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3210 	if (!conn) {
3211 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3212 				      MGMT_STATUS_NOT_CONNECTED);
3213 		goto failed;
3214 	}
3215 
3216 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3217 		struct mgmt_cp_pin_code_neg_reply ncp;
3218 
3219 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3220 
3221 		BT_ERR("PIN code is not 16 bytes long");
3222 
3223 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3224 		if (err >= 0)
3225 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3226 					      MGMT_STATUS_INVALID_PARAMS);
3227 
3228 		goto failed;
3229 	}
3230 
3231 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3232 	if (!cmd) {
3233 		err = -ENOMEM;
3234 		goto failed;
3235 	}
3236 
3237 	cmd->cmd_complete = addr_cmd_complete;
3238 
3239 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3240 	reply.pin_len = cp->pin_len;
3241 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3242 
3243 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3244 	if (err < 0)
3245 		mgmt_pending_remove(cmd);
3246 
3247 failed:
3248 	hci_dev_unlock(hdev);
3249 	return err;
3250 }
3251 
3252 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3253 			     u16 len)
3254 {
3255 	struct mgmt_cp_set_io_capability *cp = data;
3256 
3257 	BT_DBG("");
3258 
3259 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3260 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3261 					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3262 
3263 	hci_dev_lock(hdev);
3264 
3265 	hdev->io_capability = cp->io_capability;
3266 
3267 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3268 	       hdev->io_capability);
3269 
3270 	hci_dev_unlock(hdev);
3271 
3272 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3273 				 NULL, 0);
3274 }
3275 
3276 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3277 {
3278 	struct hci_dev *hdev = conn->hdev;
3279 	struct mgmt_pending_cmd *cmd;
3280 
3281 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3282 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3283 			continue;
3284 
3285 		if (cmd->user_data != conn)
3286 			continue;
3287 
3288 		return cmd;
3289 	}
3290 
3291 	return NULL;
3292 }
3293 
3294 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3295 {
3296 	struct mgmt_rp_pair_device rp;
3297 	struct hci_conn *conn = cmd->user_data;
3298 	int err;
3299 
3300 	bacpy(&rp.addr.bdaddr, &conn->dst);
3301 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3302 
3303 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3304 				status, &rp, sizeof(rp));
3305 
3306 	/* So we don't get further callbacks for this connection */
3307 	conn->connect_cfm_cb = NULL;
3308 	conn->security_cfm_cb = NULL;
3309 	conn->disconn_cfm_cb = NULL;
3310 
3311 	hci_conn_drop(conn);
3312 
3313 	/* The device is paired so there is no need to remove
3314 	 * its connection parameters anymore.
3315 	 */
3316 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3317 
3318 	hci_conn_put(conn);
3319 
3320 	return err;
3321 }
3322 
3323 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3324 {
3325 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3326 	struct mgmt_pending_cmd *cmd;
3327 
3328 	cmd = find_pairing(conn);
3329 	if (cmd) {
3330 		cmd->cmd_complete(cmd, status);
3331 		mgmt_pending_remove(cmd);
3332 	}
3333 }
3334 
3335 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3336 {
3337 	struct mgmt_pending_cmd *cmd;
3338 
3339 	BT_DBG("status %u", status);
3340 
3341 	cmd = find_pairing(conn);
3342 	if (!cmd) {
3343 		BT_DBG("Unable to find a pending command");
3344 		return;
3345 	}
3346 
3347 	cmd->cmd_complete(cmd, mgmt_status(status));
3348 	mgmt_pending_remove(cmd);
3349 }
3350 
3351 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3352 {
3353 	struct mgmt_pending_cmd *cmd;
3354 
3355 	BT_DBG("status %u", status);
3356 
3357 	if (!status)
3358 		return;
3359 
3360 	cmd = find_pairing(conn);
3361 	if (!cmd) {
3362 		BT_DBG("Unable to find a pending command");
3363 		return;
3364 	}
3365 
3366 	cmd->cmd_complete(cmd, mgmt_status(status));
3367 	mgmt_pending_remove(cmd);
3368 }
3369 
3370 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3371 		       u16 len)
3372 {
3373 	struct mgmt_cp_pair_device *cp = data;
3374 	struct mgmt_rp_pair_device rp;
3375 	struct mgmt_pending_cmd *cmd;
3376 	u8 sec_level, auth_type;
3377 	struct hci_conn *conn;
3378 	int err;
3379 
3380 	BT_DBG("");
3381 
3382 	memset(&rp, 0, sizeof(rp));
3383 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3384 	rp.addr.type = cp->addr.type;
3385 
3386 	if (!bdaddr_type_is_valid(cp->addr.type))
3387 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3388 					 MGMT_STATUS_INVALID_PARAMS,
3389 					 &rp, sizeof(rp));
3390 
3391 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3392 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3393 					 MGMT_STATUS_INVALID_PARAMS,
3394 					 &rp, sizeof(rp));
3395 
3396 	hci_dev_lock(hdev);
3397 
3398 	if (!hdev_is_powered(hdev)) {
3399 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3400 					MGMT_STATUS_NOT_POWERED, &rp,
3401 					sizeof(rp));
3402 		goto unlock;
3403 	}
3404 
3405 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3406 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3407 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3408 					sizeof(rp));
3409 		goto unlock;
3410 	}
3411 
3412 	sec_level = BT_SECURITY_MEDIUM;
3413 	auth_type = HCI_AT_DEDICATED_BONDING;
3414 
3415 	if (cp->addr.type == BDADDR_BREDR) {
3416 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3417 				       auth_type);
3418 	} else {
3419 		u8 addr_type;
3420 
3421 		/* Convert from L2CAP channel address type to HCI address type
3422 		 */
3423 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3424 			addr_type = ADDR_LE_DEV_PUBLIC;
3425 		else
3426 			addr_type = ADDR_LE_DEV_RANDOM;
3427 
3428 		/* When pairing a new device, it is expected to remember
3429 		 * this device for future connections. Adding the connection
3430 		 * parameter information ahead of time allows tracking
3431 		 * of the slave preferred values and will speed up any
3432 		 * further connection establishment.
3433 		 *
3434 		 * If connection parameters already exist, then they
3435 		 * will be kept and this function does nothing.
3436 		 */
3437 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3438 
3439 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3440 				      sec_level, HCI_LE_CONN_TIMEOUT,
3441 				      HCI_ROLE_MASTER);
3442 	}
3443 
3444 	if (IS_ERR(conn)) {
3445 		int status;
3446 
3447 		if (PTR_ERR(conn) == -EBUSY)
3448 			status = MGMT_STATUS_BUSY;
3449 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3450 			status = MGMT_STATUS_NOT_SUPPORTED;
3451 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3452 			status = MGMT_STATUS_REJECTED;
3453 		else
3454 			status = MGMT_STATUS_CONNECT_FAILED;
3455 
3456 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3457 					status, &rp, sizeof(rp));
3458 		goto unlock;
3459 	}
3460 
3461 	if (conn->connect_cfm_cb) {
3462 		hci_conn_drop(conn);
3463 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3464 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3465 		goto unlock;
3466 	}
3467 
3468 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3469 	if (!cmd) {
3470 		err = -ENOMEM;
3471 		hci_conn_drop(conn);
3472 		goto unlock;
3473 	}
3474 
3475 	cmd->cmd_complete = pairing_complete;
3476 
3477 	/* For LE, just connecting isn't a proof that the pairing finished */
3478 	if (cp->addr.type == BDADDR_BREDR) {
3479 		conn->connect_cfm_cb = pairing_complete_cb;
3480 		conn->security_cfm_cb = pairing_complete_cb;
3481 		conn->disconn_cfm_cb = pairing_complete_cb;
3482 	} else {
3483 		conn->connect_cfm_cb = le_pairing_complete_cb;
3484 		conn->security_cfm_cb = le_pairing_complete_cb;
3485 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3486 	}
3487 
3488 	conn->io_capability = cp->io_cap;
3489 	cmd->user_data = hci_conn_get(conn);
3490 
3491 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3492 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3493 		cmd->cmd_complete(cmd, 0);
3494 		mgmt_pending_remove(cmd);
3495 	}
3496 
3497 	err = 0;
3498 
3499 unlock:
3500 	hci_dev_unlock(hdev);
3501 	return err;
3502 }
3503 
3504 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3505 			      u16 len)
3506 {
3507 	struct mgmt_addr_info *addr = data;
3508 	struct mgmt_pending_cmd *cmd;
3509 	struct hci_conn *conn;
3510 	int err;
3511 
3512 	BT_DBG("");
3513 
3514 	hci_dev_lock(hdev);
3515 
3516 	if (!hdev_is_powered(hdev)) {
3517 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3518 				      MGMT_STATUS_NOT_POWERED);
3519 		goto unlock;
3520 	}
3521 
3522 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3523 	if (!cmd) {
3524 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3525 				      MGMT_STATUS_INVALID_PARAMS);
3526 		goto unlock;
3527 	}
3528 
3529 	conn = cmd->user_data;
3530 
3531 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3532 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3533 				      MGMT_STATUS_INVALID_PARAMS);
3534 		goto unlock;
3535 	}
3536 
3537 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3538 	mgmt_pending_remove(cmd);
3539 
3540 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3541 				addr, sizeof(*addr));
3542 unlock:
3543 	hci_dev_unlock(hdev);
3544 	return err;
3545 }
3546 
3547 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3548 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3549 			     u16 hci_op, __le32 passkey)
3550 {
3551 	struct mgmt_pending_cmd *cmd;
3552 	struct hci_conn *conn;
3553 	int err;
3554 
3555 	hci_dev_lock(hdev);
3556 
3557 	if (!hdev_is_powered(hdev)) {
3558 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3559 					MGMT_STATUS_NOT_POWERED, addr,
3560 					sizeof(*addr));
3561 		goto done;
3562 	}
3563 
3564 	if (addr->type == BDADDR_BREDR)
3565 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3566 	else
3567 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3568 
3569 	if (!conn) {
3570 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3571 					MGMT_STATUS_NOT_CONNECTED, addr,
3572 					sizeof(*addr));
3573 		goto done;
3574 	}
3575 
3576 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3577 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3578 		if (!err)
3579 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3580 						MGMT_STATUS_SUCCESS, addr,
3581 						sizeof(*addr));
3582 		else
3583 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3584 						MGMT_STATUS_FAILED, addr,
3585 						sizeof(*addr));
3586 
3587 		goto done;
3588 	}
3589 
3590 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3591 	if (!cmd) {
3592 		err = -ENOMEM;
3593 		goto done;
3594 	}
3595 
3596 	cmd->cmd_complete = addr_cmd_complete;
3597 
3598 	/* Continue with pairing via HCI */
3599 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3600 		struct hci_cp_user_passkey_reply cp;
3601 
3602 		bacpy(&cp.bdaddr, &addr->bdaddr);
3603 		cp.passkey = passkey;
3604 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3605 	} else
3606 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3607 				   &addr->bdaddr);
3608 
3609 	if (err < 0)
3610 		mgmt_pending_remove(cmd);
3611 
3612 done:
3613 	hci_dev_unlock(hdev);
3614 	return err;
3615 }
3616 
3617 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3618 			      void *data, u16 len)
3619 {
3620 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3621 
3622 	BT_DBG("");
3623 
3624 	return user_pairing_resp(sk, hdev, &cp->addr,
3625 				MGMT_OP_PIN_CODE_NEG_REPLY,
3626 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3627 }
3628 
3629 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3630 			      u16 len)
3631 {
3632 	struct mgmt_cp_user_confirm_reply *cp = data;
3633 
3634 	BT_DBG("");
3635 
3636 	if (len != sizeof(*cp))
3637 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3638 				       MGMT_STATUS_INVALID_PARAMS);
3639 
3640 	return user_pairing_resp(sk, hdev, &cp->addr,
3641 				 MGMT_OP_USER_CONFIRM_REPLY,
3642 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3643 }
3644 
3645 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3646 				  void *data, u16 len)
3647 {
3648 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3649 
3650 	BT_DBG("");
3651 
3652 	return user_pairing_resp(sk, hdev, &cp->addr,
3653 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3654 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3655 }
3656 
3657 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3658 			      u16 len)
3659 {
3660 	struct mgmt_cp_user_passkey_reply *cp = data;
3661 
3662 	BT_DBG("");
3663 
3664 	return user_pairing_resp(sk, hdev, &cp->addr,
3665 				 MGMT_OP_USER_PASSKEY_REPLY,
3666 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3667 }
3668 
3669 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3670 				  void *data, u16 len)
3671 {
3672 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3673 
3674 	BT_DBG("");
3675 
3676 	return user_pairing_resp(sk, hdev, &cp->addr,
3677 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3678 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3679 }
3680 
3681 static void update_name(struct hci_request *req)
3682 {
3683 	struct hci_dev *hdev = req->hdev;
3684 	struct hci_cp_write_local_name cp;
3685 
3686 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3687 
3688 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3689 }
3690 
3691 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3692 {
3693 	struct mgmt_cp_set_local_name *cp;
3694 	struct mgmt_pending_cmd *cmd;
3695 
3696 	BT_DBG("status 0x%02x", status);
3697 
3698 	hci_dev_lock(hdev);
3699 
3700 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3701 	if (!cmd)
3702 		goto unlock;
3703 
3704 	cp = cmd->param;
3705 
3706 	if (status)
3707 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3708 			        mgmt_status(status));
3709 	else
3710 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3711 				  cp, sizeof(*cp));
3712 
3713 	mgmt_pending_remove(cmd);
3714 
3715 unlock:
3716 	hci_dev_unlock(hdev);
3717 }
3718 
3719 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3720 			  u16 len)
3721 {
3722 	struct mgmt_cp_set_local_name *cp = data;
3723 	struct mgmt_pending_cmd *cmd;
3724 	struct hci_request req;
3725 	int err;
3726 
3727 	BT_DBG("");
3728 
3729 	hci_dev_lock(hdev);
3730 
3731 	/* If the old values are the same as the new ones just return a
3732 	 * direct command complete event.
3733 	 */
3734 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3735 	    !memcmp(hdev->short_name, cp->short_name,
3736 		    sizeof(hdev->short_name))) {
3737 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3738 					data, len);
3739 		goto failed;
3740 	}
3741 
3742 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3743 
3744 	if (!hdev_is_powered(hdev)) {
3745 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3746 
3747 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3748 					data, len);
3749 		if (err < 0)
3750 			goto failed;
3751 
3752 		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3753 					 data, len, sk);
3754 
3755 		goto failed;
3756 	}
3757 
3758 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3759 	if (!cmd) {
3760 		err = -ENOMEM;
3761 		goto failed;
3762 	}
3763 
3764 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3765 
3766 	hci_req_init(&req, hdev);
3767 
3768 	if (lmp_bredr_capable(hdev)) {
3769 		update_name(&req);
3770 		update_eir(&req);
3771 	}
3772 
3773 	/* The name is stored in the scan response data and so
3774 	 * no need to udpate the advertising data here.
3775 	 */
3776 	if (lmp_le_capable(hdev))
3777 		update_scan_rsp_data(&req);
3778 
3779 	err = hci_req_run(&req, set_name_complete);
3780 	if (err < 0)
3781 		mgmt_pending_remove(cmd);
3782 
3783 failed:
3784 	hci_dev_unlock(hdev);
3785 	return err;
3786 }
3787 
3788 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3789 				         u16 opcode, struct sk_buff *skb)
3790 {
3791 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3792 	size_t rp_size = sizeof(mgmt_rp);
3793 	struct mgmt_pending_cmd *cmd;
3794 
3795 	BT_DBG("%s status %u", hdev->name, status);
3796 
3797 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3798 	if (!cmd)
3799 		return;
3800 
3801 	if (status || !skb) {
3802 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3803 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3804 		goto remove;
3805 	}
3806 
3807 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3808 
3809 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3810 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3811 
3812 		if (skb->len < sizeof(*rp)) {
3813 			mgmt_cmd_status(cmd->sk, hdev->id,
3814 					MGMT_OP_READ_LOCAL_OOB_DATA,
3815 					MGMT_STATUS_FAILED);
3816 			goto remove;
3817 		}
3818 
3819 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3820 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3821 
3822 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3823 	} else {
3824 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3825 
3826 		if (skb->len < sizeof(*rp)) {
3827 			mgmt_cmd_status(cmd->sk, hdev->id,
3828 					MGMT_OP_READ_LOCAL_OOB_DATA,
3829 					MGMT_STATUS_FAILED);
3830 			goto remove;
3831 		}
3832 
3833 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3834 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3835 
3836 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3837 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3838 	}
3839 
3840 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3841 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3842 
3843 remove:
3844 	mgmt_pending_remove(cmd);
3845 }
3846 
3847 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3848 			       void *data, u16 data_len)
3849 {
3850 	struct mgmt_pending_cmd *cmd;
3851 	struct hci_request req;
3852 	int err;
3853 
3854 	BT_DBG("%s", hdev->name);
3855 
3856 	hci_dev_lock(hdev);
3857 
3858 	if (!hdev_is_powered(hdev)) {
3859 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3860 				      MGMT_STATUS_NOT_POWERED);
3861 		goto unlock;
3862 	}
3863 
3864 	if (!lmp_ssp_capable(hdev)) {
3865 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3866 				      MGMT_STATUS_NOT_SUPPORTED);
3867 		goto unlock;
3868 	}
3869 
3870 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3872 				      MGMT_STATUS_BUSY);
3873 		goto unlock;
3874 	}
3875 
3876 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3877 	if (!cmd) {
3878 		err = -ENOMEM;
3879 		goto unlock;
3880 	}
3881 
3882 	hci_req_init(&req, hdev);
3883 
3884 	if (bredr_sc_enabled(hdev))
3885 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3886 	else
3887 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3888 
3889 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3890 	if (err < 0)
3891 		mgmt_pending_remove(cmd);
3892 
3893 unlock:
3894 	hci_dev_unlock(hdev);
3895 	return err;
3896 }
3897 
3898 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3899 			       void *data, u16 len)
3900 {
3901 	struct mgmt_addr_info *addr = data;
3902 	int err;
3903 
3904 	BT_DBG("%s ", hdev->name);
3905 
3906 	if (!bdaddr_type_is_valid(addr->type))
3907 		return mgmt_cmd_complete(sk, hdev->id,
3908 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3909 					 MGMT_STATUS_INVALID_PARAMS,
3910 					 addr, sizeof(*addr));
3911 
3912 	hci_dev_lock(hdev);
3913 
3914 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3915 		struct mgmt_cp_add_remote_oob_data *cp = data;
3916 		u8 status;
3917 
3918 		if (cp->addr.type != BDADDR_BREDR) {
3919 			err = mgmt_cmd_complete(sk, hdev->id,
3920 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3921 						MGMT_STATUS_INVALID_PARAMS,
3922 						&cp->addr, sizeof(cp->addr));
3923 			goto unlock;
3924 		}
3925 
3926 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3927 					      cp->addr.type, cp->hash,
3928 					      cp->rand, NULL, NULL);
3929 		if (err < 0)
3930 			status = MGMT_STATUS_FAILED;
3931 		else
3932 			status = MGMT_STATUS_SUCCESS;
3933 
3934 		err = mgmt_cmd_complete(sk, hdev->id,
3935 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3936 					&cp->addr, sizeof(cp->addr));
3937 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3938 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3939 		u8 *rand192, *hash192, *rand256, *hash256;
3940 		u8 status;
3941 
3942 		if (bdaddr_type_is_le(cp->addr.type)) {
3943 			/* Enforce zero-valued 192-bit parameters as
3944 			 * long as legacy SMP OOB isn't implemented.
3945 			 */
3946 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3947 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3948 				err = mgmt_cmd_complete(sk, hdev->id,
3949 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3950 							MGMT_STATUS_INVALID_PARAMS,
3951 							addr, sizeof(*addr));
3952 				goto unlock;
3953 			}
3954 
3955 			rand192 = NULL;
3956 			hash192 = NULL;
3957 		} else {
3958 			/* In case one of the P-192 values is set to zero,
3959 			 * then just disable OOB data for P-192.
3960 			 */
3961 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3962 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3963 				rand192 = NULL;
3964 				hash192 = NULL;
3965 			} else {
3966 				rand192 = cp->rand192;
3967 				hash192 = cp->hash192;
3968 			}
3969 		}
3970 
3971 		/* In case one of the P-256 values is set to zero, then just
3972 		 * disable OOB data for P-256.
3973 		 */
3974 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3975 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3976 			rand256 = NULL;
3977 			hash256 = NULL;
3978 		} else {
3979 			rand256 = cp->rand256;
3980 			hash256 = cp->hash256;
3981 		}
3982 
3983 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3984 					      cp->addr.type, hash192, rand192,
3985 					      hash256, rand256);
3986 		if (err < 0)
3987 			status = MGMT_STATUS_FAILED;
3988 		else
3989 			status = MGMT_STATUS_SUCCESS;
3990 
3991 		err = mgmt_cmd_complete(sk, hdev->id,
3992 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3993 					status, &cp->addr, sizeof(cp->addr));
3994 	} else {
3995 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3996 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3997 				      MGMT_STATUS_INVALID_PARAMS);
3998 	}
3999 
4000 unlock:
4001 	hci_dev_unlock(hdev);
4002 	return err;
4003 }
4004 
4005 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4006 				  void *data, u16 len)
4007 {
4008 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4009 	u8 status;
4010 	int err;
4011 
4012 	BT_DBG("%s", hdev->name);
4013 
4014 	if (cp->addr.type != BDADDR_BREDR)
4015 		return mgmt_cmd_complete(sk, hdev->id,
4016 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4017 					 MGMT_STATUS_INVALID_PARAMS,
4018 					 &cp->addr, sizeof(cp->addr));
4019 
4020 	hci_dev_lock(hdev);
4021 
4022 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4023 		hci_remote_oob_data_clear(hdev);
4024 		status = MGMT_STATUS_SUCCESS;
4025 		goto done;
4026 	}
4027 
4028 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4029 	if (err < 0)
4030 		status = MGMT_STATUS_INVALID_PARAMS;
4031 	else
4032 		status = MGMT_STATUS_SUCCESS;
4033 
4034 done:
4035 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4036 				status, &cp->addr, sizeof(cp->addr));
4037 
4038 	hci_dev_unlock(hdev);
4039 	return err;
4040 }
4041 
4042 static bool trigger_bredr_inquiry(struct hci_request *req, u8 *status)
4043 {
4044 	struct hci_dev *hdev = req->hdev;
4045 	struct hci_cp_inquiry cp;
4046 	/* General inquiry access code (GIAC) */
4047 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
4048 
4049 	*status = mgmt_bredr_support(hdev);
4050 	if (*status)
4051 		return false;
4052 
4053 	if (hci_dev_test_flag(hdev, HCI_INQUIRY)) {
4054 		*status = MGMT_STATUS_BUSY;
4055 		return false;
4056 	}
4057 
4058 	hci_inquiry_cache_flush(hdev);
4059 
4060 	memset(&cp, 0, sizeof(cp));
4061 	memcpy(&cp.lap, lap, sizeof(cp.lap));
4062 	cp.length = DISCOV_BREDR_INQUIRY_LEN;
4063 
4064 	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
4065 
4066 	return true;
4067 }
4068 
4069 static bool trigger_le_scan(struct hci_request *req, u16 interval, u8 *status)
4070 {
4071 	struct hci_dev *hdev = req->hdev;
4072 	struct hci_cp_le_set_scan_param param_cp;
4073 	struct hci_cp_le_set_scan_enable enable_cp;
4074 	u8 own_addr_type;
4075 	int err;
4076 
4077 	*status = mgmt_le_support(hdev);
4078 	if (*status)
4079 		return false;
4080 
4081 	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
4082 		/* Don't let discovery abort an outgoing connection attempt
4083 		 * that's using directed advertising.
4084 		 */
4085 		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
4086 			*status = MGMT_STATUS_REJECTED;
4087 			return false;
4088 		}
4089 
4090 		disable_advertising(req);
4091 	}
4092 
4093 	/* If controller is scanning, it means the background scanning is
4094 	 * running. Thus, we should temporarily stop it in order to set the
4095 	 * discovery scanning parameters.
4096 	 */
4097 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
4098 		hci_req_add_le_scan_disable(req);
4099 
4100 	/* All active scans will be done with either a resolvable private
4101 	 * address (when privacy feature has been enabled) or non-resolvable
4102 	 * private address.
4103 	 */
4104 	err = hci_update_random_address(req, true, &own_addr_type);
4105 	if (err < 0) {
4106 		*status = MGMT_STATUS_FAILED;
4107 		return false;
4108 	}
4109 
4110 	memset(&param_cp, 0, sizeof(param_cp));
4111 	param_cp.type = LE_SCAN_ACTIVE;
4112 	param_cp.interval = cpu_to_le16(interval);
4113 	param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4114 	param_cp.own_address_type = own_addr_type;
4115 
4116 	hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4117 		    &param_cp);
4118 
4119 	memset(&enable_cp, 0, sizeof(enable_cp));
4120 	enable_cp.enable = LE_SCAN_ENABLE;
4121 	enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4122 
4123 	hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4124 		    &enable_cp);
4125 
4126 	return true;
4127 }
4128 
4129 static bool trigger_discovery(struct hci_request *req, u8 *status)
4130 {
4131 	struct hci_dev *hdev = req->hdev;
4132 
4133 	switch (hdev->discovery.type) {
4134 	case DISCOV_TYPE_BREDR:
4135 		if (!trigger_bredr_inquiry(req, status))
4136 			return false;
4137 		break;
4138 
4139 	case DISCOV_TYPE_INTERLEAVED:
4140 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
4141 			     &hdev->quirks)) {
4142 			/* During simultaneous discovery, we double LE scan
4143 			 * interval. We must leave some time for the controller
4144 			 * to do BR/EDR inquiry.
4145 			 */
4146 			if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT * 2,
4147 					     status))
4148 				return false;
4149 
4150 			if (!trigger_bredr_inquiry(req, status))
4151 				return false;
4152 
4153 			return true;
4154 		}
4155 
4156 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4157 			*status = MGMT_STATUS_NOT_SUPPORTED;
4158 			return false;
4159 		}
4160 		/* fall through */
4161 
4162 	case DISCOV_TYPE_LE:
4163 		if (!trigger_le_scan(req, DISCOV_LE_SCAN_INT, status))
4164 			return false;
4165 		break;
4166 
4167 	default:
4168 		*status = MGMT_STATUS_INVALID_PARAMS;
4169 		return false;
4170 	}
4171 
4172 	return true;
4173 }
4174 
4175 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4176 				     u16 opcode)
4177 {
4178 	struct mgmt_pending_cmd *cmd;
4179 	unsigned long timeout;
4180 
4181 	BT_DBG("status %d", status);
4182 
4183 	hci_dev_lock(hdev);
4184 
4185 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4186 	if (!cmd)
4187 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4188 
4189 	if (cmd) {
4190 		cmd->cmd_complete(cmd, mgmt_status(status));
4191 		mgmt_pending_remove(cmd);
4192 	}
4193 
4194 	if (status) {
4195 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4196 		goto unlock;
4197 	}
4198 
4199 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4200 
4201 	/* If the scan involves LE scan, pick proper timeout to schedule
4202 	 * hdev->le_scan_disable that will stop it.
4203 	 */
4204 	switch (hdev->discovery.type) {
4205 	case DISCOV_TYPE_LE:
4206 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4207 		break;
4208 	case DISCOV_TYPE_INTERLEAVED:
4209 		 /* When running simultaneous discovery, the LE scanning time
4210 		 * should occupy the whole discovery time sine BR/EDR inquiry
4211 		 * and LE scanning are scheduled by the controller.
4212 		 *
4213 		 * For interleaving discovery in comparison, BR/EDR inquiry
4214 		 * and LE scanning are done sequentially with separate
4215 		 * timeouts.
4216 		 */
4217 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
4218 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4219 		else
4220 			timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4221 		break;
4222 	case DISCOV_TYPE_BREDR:
4223 		timeout = 0;
4224 		break;
4225 	default:
4226 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4227 		timeout = 0;
4228 		break;
4229 	}
4230 
4231 	if (timeout) {
4232 		/* When service discovery is used and the controller has
4233 		 * a strict duplicate filter, it is important to remember
4234 		 * the start and duration of the scan. This is required
4235 		 * for restarting scanning during the discovery phase.
4236 		 */
4237 		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4238 			     &hdev->quirks) &&
4239 		    hdev->discovery.result_filtering) {
4240 			hdev->discovery.scan_start = jiffies;
4241 			hdev->discovery.scan_duration = timeout;
4242 		}
4243 
4244 		queue_delayed_work(hdev->workqueue,
4245 				   &hdev->le_scan_disable, timeout);
4246 	}
4247 
4248 unlock:
4249 	hci_dev_unlock(hdev);
4250 }
4251 
4252 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4253 			   void *data, u16 len)
4254 {
4255 	struct mgmt_cp_start_discovery *cp = data;
4256 	struct mgmt_pending_cmd *cmd;
4257 	struct hci_request req;
4258 	u8 status;
4259 	int err;
4260 
4261 	BT_DBG("%s", hdev->name);
4262 
4263 	hci_dev_lock(hdev);
4264 
4265 	if (!hdev_is_powered(hdev)) {
4266 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4267 					MGMT_STATUS_NOT_POWERED,
4268 					&cp->type, sizeof(cp->type));
4269 		goto failed;
4270 	}
4271 
4272 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4273 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4274 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4275 					MGMT_STATUS_BUSY, &cp->type,
4276 					sizeof(cp->type));
4277 		goto failed;
4278 	}
4279 
4280 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4281 	if (!cmd) {
4282 		err = -ENOMEM;
4283 		goto failed;
4284 	}
4285 
4286 	cmd->cmd_complete = generic_cmd_complete;
4287 
4288 	/* Clear the discovery filter first to free any previously
4289 	 * allocated memory for the UUID list.
4290 	 */
4291 	hci_discovery_filter_clear(hdev);
4292 
4293 	hdev->discovery.type = cp->type;
4294 	hdev->discovery.report_invalid_rssi = false;
4295 
4296 	hci_req_init(&req, hdev);
4297 
4298 	if (!trigger_discovery(&req, &status)) {
4299 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4300 					status, &cp->type, sizeof(cp->type));
4301 		mgmt_pending_remove(cmd);
4302 		goto failed;
4303 	}
4304 
4305 	err = hci_req_run(&req, start_discovery_complete);
4306 	if (err < 0) {
4307 		mgmt_pending_remove(cmd);
4308 		goto failed;
4309 	}
4310 
4311 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4312 
4313 failed:
4314 	hci_dev_unlock(hdev);
4315 	return err;
4316 }
4317 
4318 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4319 					  u8 status)
4320 {
4321 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4322 				 cmd->param, 1);
4323 }
4324 
4325 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4326 				   void *data, u16 len)
4327 {
4328 	struct mgmt_cp_start_service_discovery *cp = data;
4329 	struct mgmt_pending_cmd *cmd;
4330 	struct hci_request req;
4331 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4332 	u16 uuid_count, expected_len;
4333 	u8 status;
4334 	int err;
4335 
4336 	BT_DBG("%s", hdev->name);
4337 
4338 	hci_dev_lock(hdev);
4339 
4340 	if (!hdev_is_powered(hdev)) {
4341 		err = mgmt_cmd_complete(sk, hdev->id,
4342 					MGMT_OP_START_SERVICE_DISCOVERY,
4343 					MGMT_STATUS_NOT_POWERED,
4344 					&cp->type, sizeof(cp->type));
4345 		goto failed;
4346 	}
4347 
4348 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4349 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4350 		err = mgmt_cmd_complete(sk, hdev->id,
4351 					MGMT_OP_START_SERVICE_DISCOVERY,
4352 					MGMT_STATUS_BUSY, &cp->type,
4353 					sizeof(cp->type));
4354 		goto failed;
4355 	}
4356 
4357 	uuid_count = __le16_to_cpu(cp->uuid_count);
4358 	if (uuid_count > max_uuid_count) {
4359 		BT_ERR("service_discovery: too big uuid_count value %u",
4360 		       uuid_count);
4361 		err = mgmt_cmd_complete(sk, hdev->id,
4362 					MGMT_OP_START_SERVICE_DISCOVERY,
4363 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4364 					sizeof(cp->type));
4365 		goto failed;
4366 	}
4367 
4368 	expected_len = sizeof(*cp) + uuid_count * 16;
4369 	if (expected_len != len) {
4370 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4371 		       expected_len, len);
4372 		err = mgmt_cmd_complete(sk, hdev->id,
4373 					MGMT_OP_START_SERVICE_DISCOVERY,
4374 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4375 					sizeof(cp->type));
4376 		goto failed;
4377 	}
4378 
4379 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4380 			       hdev, data, len);
4381 	if (!cmd) {
4382 		err = -ENOMEM;
4383 		goto failed;
4384 	}
4385 
4386 	cmd->cmd_complete = service_discovery_cmd_complete;
4387 
4388 	/* Clear the discovery filter first to free any previously
4389 	 * allocated memory for the UUID list.
4390 	 */
4391 	hci_discovery_filter_clear(hdev);
4392 
4393 	hdev->discovery.result_filtering = true;
4394 	hdev->discovery.type = cp->type;
4395 	hdev->discovery.rssi = cp->rssi;
4396 	hdev->discovery.uuid_count = uuid_count;
4397 
4398 	if (uuid_count > 0) {
4399 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4400 						GFP_KERNEL);
4401 		if (!hdev->discovery.uuids) {
4402 			err = mgmt_cmd_complete(sk, hdev->id,
4403 						MGMT_OP_START_SERVICE_DISCOVERY,
4404 						MGMT_STATUS_FAILED,
4405 						&cp->type, sizeof(cp->type));
4406 			mgmt_pending_remove(cmd);
4407 			goto failed;
4408 		}
4409 	}
4410 
4411 	hci_req_init(&req, hdev);
4412 
4413 	if (!trigger_discovery(&req, &status)) {
4414 		err = mgmt_cmd_complete(sk, hdev->id,
4415 					MGMT_OP_START_SERVICE_DISCOVERY,
4416 					status, &cp->type, sizeof(cp->type));
4417 		mgmt_pending_remove(cmd);
4418 		goto failed;
4419 	}
4420 
4421 	err = hci_req_run(&req, start_discovery_complete);
4422 	if (err < 0) {
4423 		mgmt_pending_remove(cmd);
4424 		goto failed;
4425 	}
4426 
4427 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4428 
4429 failed:
4430 	hci_dev_unlock(hdev);
4431 	return err;
4432 }
4433 
4434 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4435 {
4436 	struct mgmt_pending_cmd *cmd;
4437 
4438 	BT_DBG("status %d", status);
4439 
4440 	hci_dev_lock(hdev);
4441 
4442 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4443 	if (cmd) {
4444 		cmd->cmd_complete(cmd, mgmt_status(status));
4445 		mgmt_pending_remove(cmd);
4446 	}
4447 
4448 	if (!status)
4449 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4450 
4451 	hci_dev_unlock(hdev);
4452 }
4453 
4454 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4455 			  u16 len)
4456 {
4457 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4458 	struct mgmt_pending_cmd *cmd;
4459 	struct hci_request req;
4460 	int err;
4461 
4462 	BT_DBG("%s", hdev->name);
4463 
4464 	hci_dev_lock(hdev);
4465 
4466 	if (!hci_discovery_active(hdev)) {
4467 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4468 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4469 					sizeof(mgmt_cp->type));
4470 		goto unlock;
4471 	}
4472 
4473 	if (hdev->discovery.type != mgmt_cp->type) {
4474 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4475 					MGMT_STATUS_INVALID_PARAMS,
4476 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4477 		goto unlock;
4478 	}
4479 
4480 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4481 	if (!cmd) {
4482 		err = -ENOMEM;
4483 		goto unlock;
4484 	}
4485 
4486 	cmd->cmd_complete = generic_cmd_complete;
4487 
4488 	hci_req_init(&req, hdev);
4489 
4490 	hci_stop_discovery(&req);
4491 
4492 	err = hci_req_run(&req, stop_discovery_complete);
4493 	if (!err) {
4494 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4495 		goto unlock;
4496 	}
4497 
4498 	mgmt_pending_remove(cmd);
4499 
4500 	/* If no HCI commands were sent we're done */
4501 	if (err == -ENODATA) {
4502 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4503 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4504 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4505 	}
4506 
4507 unlock:
4508 	hci_dev_unlock(hdev);
4509 	return err;
4510 }
4511 
4512 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4513 			u16 len)
4514 {
4515 	struct mgmt_cp_confirm_name *cp = data;
4516 	struct inquiry_entry *e;
4517 	int err;
4518 
4519 	BT_DBG("%s", hdev->name);
4520 
4521 	hci_dev_lock(hdev);
4522 
4523 	if (!hci_discovery_active(hdev)) {
4524 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4525 					MGMT_STATUS_FAILED, &cp->addr,
4526 					sizeof(cp->addr));
4527 		goto failed;
4528 	}
4529 
4530 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4531 	if (!e) {
4532 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4533 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4534 					sizeof(cp->addr));
4535 		goto failed;
4536 	}
4537 
4538 	if (cp->name_known) {
4539 		e->name_state = NAME_KNOWN;
4540 		list_del(&e->list);
4541 	} else {
4542 		e->name_state = NAME_NEEDED;
4543 		hci_inquiry_cache_update_resolve(hdev, e);
4544 	}
4545 
4546 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4547 				&cp->addr, sizeof(cp->addr));
4548 
4549 failed:
4550 	hci_dev_unlock(hdev);
4551 	return err;
4552 }
4553 
4554 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4555 			u16 len)
4556 {
4557 	struct mgmt_cp_block_device *cp = data;
4558 	u8 status;
4559 	int err;
4560 
4561 	BT_DBG("%s", hdev->name);
4562 
4563 	if (!bdaddr_type_is_valid(cp->addr.type))
4564 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4565 					 MGMT_STATUS_INVALID_PARAMS,
4566 					 &cp->addr, sizeof(cp->addr));
4567 
4568 	hci_dev_lock(hdev);
4569 
4570 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4571 				  cp->addr.type);
4572 	if (err < 0) {
4573 		status = MGMT_STATUS_FAILED;
4574 		goto done;
4575 	}
4576 
4577 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4578 		   sk);
4579 	status = MGMT_STATUS_SUCCESS;
4580 
4581 done:
4582 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4583 				&cp->addr, sizeof(cp->addr));
4584 
4585 	hci_dev_unlock(hdev);
4586 
4587 	return err;
4588 }
4589 
4590 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4591 			  u16 len)
4592 {
4593 	struct mgmt_cp_unblock_device *cp = data;
4594 	u8 status;
4595 	int err;
4596 
4597 	BT_DBG("%s", hdev->name);
4598 
4599 	if (!bdaddr_type_is_valid(cp->addr.type))
4600 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4601 					 MGMT_STATUS_INVALID_PARAMS,
4602 					 &cp->addr, sizeof(cp->addr));
4603 
4604 	hci_dev_lock(hdev);
4605 
4606 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4607 				  cp->addr.type);
4608 	if (err < 0) {
4609 		status = MGMT_STATUS_INVALID_PARAMS;
4610 		goto done;
4611 	}
4612 
4613 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4614 		   sk);
4615 	status = MGMT_STATUS_SUCCESS;
4616 
4617 done:
4618 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4619 				&cp->addr, sizeof(cp->addr));
4620 
4621 	hci_dev_unlock(hdev);
4622 
4623 	return err;
4624 }
4625 
4626 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4627 			 u16 len)
4628 {
4629 	struct mgmt_cp_set_device_id *cp = data;
4630 	struct hci_request req;
4631 	int err;
4632 	__u16 source;
4633 
4634 	BT_DBG("%s", hdev->name);
4635 
4636 	source = __le16_to_cpu(cp->source);
4637 
4638 	if (source > 0x0002)
4639 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4640 				       MGMT_STATUS_INVALID_PARAMS);
4641 
4642 	hci_dev_lock(hdev);
4643 
4644 	hdev->devid_source = source;
4645 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4646 	hdev->devid_product = __le16_to_cpu(cp->product);
4647 	hdev->devid_version = __le16_to_cpu(cp->version);
4648 
4649 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4650 				NULL, 0);
4651 
4652 	hci_req_init(&req, hdev);
4653 	update_eir(&req);
4654 	hci_req_run(&req, NULL);
4655 
4656 	hci_dev_unlock(hdev);
4657 
4658 	return err;
4659 }
4660 
4661 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4662 					u16 opcode)
4663 {
4664 	BT_DBG("status %d", status);
4665 }
4666 
4667 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4668 				     u16 opcode)
4669 {
4670 	struct cmd_lookup match = { NULL, hdev };
4671 	struct hci_request req;
4672 
4673 	hci_dev_lock(hdev);
4674 
4675 	if (status) {
4676 		u8 mgmt_err = mgmt_status(status);
4677 
4678 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4679 				     cmd_status_rsp, &mgmt_err);
4680 		goto unlock;
4681 	}
4682 
4683 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4684 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4685 	else
4686 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4687 
4688 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4689 			     &match);
4690 
4691 	new_settings(hdev, match.sk);
4692 
4693 	if (match.sk)
4694 		sock_put(match.sk);
4695 
4696 	/* If "Set Advertising" was just disabled and instance advertising was
4697 	 * set up earlier, then enable the advertising instance.
4698 	 */
4699 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4700 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
4701 		goto unlock;
4702 
4703 	hci_req_init(&req, hdev);
4704 
4705 	update_adv_data(&req);
4706 	enable_advertising(&req);
4707 
4708 	if (hci_req_run(&req, enable_advertising_instance) < 0)
4709 		BT_ERR("Failed to re-configure advertising");
4710 
4711 unlock:
4712 	hci_dev_unlock(hdev);
4713 }
4714 
4715 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4716 			   u16 len)
4717 {
4718 	struct mgmt_mode *cp = data;
4719 	struct mgmt_pending_cmd *cmd;
4720 	struct hci_request req;
4721 	u8 val, status;
4722 	int err;
4723 
4724 	BT_DBG("request for %s", hdev->name);
4725 
4726 	status = mgmt_le_support(hdev);
4727 	if (status)
4728 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4729 				       status);
4730 
4731 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4732 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4733 				       MGMT_STATUS_INVALID_PARAMS);
4734 
4735 	hci_dev_lock(hdev);
4736 
4737 	val = !!cp->val;
4738 
4739 	/* The following conditions are ones which mean that we should
4740 	 * not do any HCI communication but directly send a mgmt
4741 	 * response to user space (after toggling the flag if
4742 	 * necessary).
4743 	 */
4744 	if (!hdev_is_powered(hdev) ||
4745 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4746 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4747 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4748 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4749 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4750 		bool changed;
4751 
4752 		if (cp->val) {
4753 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4754 			if (cp->val == 0x02)
4755 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4756 			else
4757 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4758 		} else {
4759 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4760 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4761 		}
4762 
4763 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4764 		if (err < 0)
4765 			goto unlock;
4766 
4767 		if (changed)
4768 			err = new_settings(hdev, sk);
4769 
4770 		goto unlock;
4771 	}
4772 
4773 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4774 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4775 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4776 				      MGMT_STATUS_BUSY);
4777 		goto unlock;
4778 	}
4779 
4780 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4781 	if (!cmd) {
4782 		err = -ENOMEM;
4783 		goto unlock;
4784 	}
4785 
4786 	hci_req_init(&req, hdev);
4787 
4788 	if (cp->val == 0x02)
4789 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4790 	else
4791 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4792 
4793 	if (val) {
4794 		/* Switch to instance "0" for the Set Advertising setting. */
4795 		update_adv_data_for_instance(&req, 0);
4796 		update_scan_rsp_data_for_instance(&req, 0);
4797 		enable_advertising(&req);
4798 	} else {
4799 		disable_advertising(&req);
4800 	}
4801 
4802 	err = hci_req_run(&req, set_advertising_complete);
4803 	if (err < 0)
4804 		mgmt_pending_remove(cmd);
4805 
4806 unlock:
4807 	hci_dev_unlock(hdev);
4808 	return err;
4809 }
4810 
4811 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4812 			      void *data, u16 len)
4813 {
4814 	struct mgmt_cp_set_static_address *cp = data;
4815 	int err;
4816 
4817 	BT_DBG("%s", hdev->name);
4818 
4819 	if (!lmp_le_capable(hdev))
4820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4821 				       MGMT_STATUS_NOT_SUPPORTED);
4822 
4823 	if (hdev_is_powered(hdev))
4824 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4825 				       MGMT_STATUS_REJECTED);
4826 
4827 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4828 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4829 			return mgmt_cmd_status(sk, hdev->id,
4830 					       MGMT_OP_SET_STATIC_ADDRESS,
4831 					       MGMT_STATUS_INVALID_PARAMS);
4832 
4833 		/* Two most significant bits shall be set */
4834 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4835 			return mgmt_cmd_status(sk, hdev->id,
4836 					       MGMT_OP_SET_STATIC_ADDRESS,
4837 					       MGMT_STATUS_INVALID_PARAMS);
4838 	}
4839 
4840 	hci_dev_lock(hdev);
4841 
4842 	bacpy(&hdev->static_addr, &cp->bdaddr);
4843 
4844 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4845 	if (err < 0)
4846 		goto unlock;
4847 
4848 	err = new_settings(hdev, sk);
4849 
4850 unlock:
4851 	hci_dev_unlock(hdev);
4852 	return err;
4853 }
4854 
4855 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4856 			   void *data, u16 len)
4857 {
4858 	struct mgmt_cp_set_scan_params *cp = data;
4859 	__u16 interval, window;
4860 	int err;
4861 
4862 	BT_DBG("%s", hdev->name);
4863 
4864 	if (!lmp_le_capable(hdev))
4865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4866 				       MGMT_STATUS_NOT_SUPPORTED);
4867 
4868 	interval = __le16_to_cpu(cp->interval);
4869 
4870 	if (interval < 0x0004 || interval > 0x4000)
4871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4872 				       MGMT_STATUS_INVALID_PARAMS);
4873 
4874 	window = __le16_to_cpu(cp->window);
4875 
4876 	if (window < 0x0004 || window > 0x4000)
4877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4878 				       MGMT_STATUS_INVALID_PARAMS);
4879 
4880 	if (window > interval)
4881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4882 				       MGMT_STATUS_INVALID_PARAMS);
4883 
4884 	hci_dev_lock(hdev);
4885 
4886 	hdev->le_scan_interval = interval;
4887 	hdev->le_scan_window = window;
4888 
4889 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4890 				NULL, 0);
4891 
4892 	/* If background scan is running, restart it so new parameters are
4893 	 * loaded.
4894 	 */
4895 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4896 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4897 		struct hci_request req;
4898 
4899 		hci_req_init(&req, hdev);
4900 
4901 		hci_req_add_le_scan_disable(&req);
4902 		hci_req_add_le_passive_scan(&req);
4903 
4904 		hci_req_run(&req, NULL);
4905 	}
4906 
4907 	hci_dev_unlock(hdev);
4908 
4909 	return err;
4910 }
4911 
4912 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4913 				      u16 opcode)
4914 {
4915 	struct mgmt_pending_cmd *cmd;
4916 
4917 	BT_DBG("status 0x%02x", status);
4918 
4919 	hci_dev_lock(hdev);
4920 
4921 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4922 	if (!cmd)
4923 		goto unlock;
4924 
4925 	if (status) {
4926 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4927 			        mgmt_status(status));
4928 	} else {
4929 		struct mgmt_mode *cp = cmd->param;
4930 
4931 		if (cp->val)
4932 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4933 		else
4934 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4935 
4936 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4937 		new_settings(hdev, cmd->sk);
4938 	}
4939 
4940 	mgmt_pending_remove(cmd);
4941 
4942 unlock:
4943 	hci_dev_unlock(hdev);
4944 }
4945 
4946 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4947 				void *data, u16 len)
4948 {
4949 	struct mgmt_mode *cp = data;
4950 	struct mgmt_pending_cmd *cmd;
4951 	struct hci_request req;
4952 	int err;
4953 
4954 	BT_DBG("%s", hdev->name);
4955 
4956 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4957 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4959 				       MGMT_STATUS_NOT_SUPPORTED);
4960 
4961 	if (cp->val != 0x00 && cp->val != 0x01)
4962 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4963 				       MGMT_STATUS_INVALID_PARAMS);
4964 
4965 	hci_dev_lock(hdev);
4966 
4967 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4968 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4969 				      MGMT_STATUS_BUSY);
4970 		goto unlock;
4971 	}
4972 
4973 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4974 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4975 					hdev);
4976 		goto unlock;
4977 	}
4978 
4979 	if (!hdev_is_powered(hdev)) {
4980 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4981 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4982 					hdev);
4983 		new_settings(hdev, sk);
4984 		goto unlock;
4985 	}
4986 
4987 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4988 			       data, len);
4989 	if (!cmd) {
4990 		err = -ENOMEM;
4991 		goto unlock;
4992 	}
4993 
4994 	hci_req_init(&req, hdev);
4995 
4996 	write_fast_connectable(&req, cp->val);
4997 
4998 	err = hci_req_run(&req, fast_connectable_complete);
4999 	if (err < 0) {
5000 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5001 				      MGMT_STATUS_FAILED);
5002 		mgmt_pending_remove(cmd);
5003 	}
5004 
5005 unlock:
5006 	hci_dev_unlock(hdev);
5007 
5008 	return err;
5009 }
5010 
5011 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5012 {
5013 	struct mgmt_pending_cmd *cmd;
5014 
5015 	BT_DBG("status 0x%02x", status);
5016 
5017 	hci_dev_lock(hdev);
5018 
5019 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5020 	if (!cmd)
5021 		goto unlock;
5022 
5023 	if (status) {
5024 		u8 mgmt_err = mgmt_status(status);
5025 
5026 		/* We need to restore the flag if related HCI commands
5027 		 * failed.
5028 		 */
5029 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5030 
5031 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5032 	} else {
5033 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5034 		new_settings(hdev, cmd->sk);
5035 	}
5036 
5037 	mgmt_pending_remove(cmd);
5038 
5039 unlock:
5040 	hci_dev_unlock(hdev);
5041 }
5042 
5043 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5044 {
5045 	struct mgmt_mode *cp = data;
5046 	struct mgmt_pending_cmd *cmd;
5047 	struct hci_request req;
5048 	int err;
5049 
5050 	BT_DBG("request for %s", hdev->name);
5051 
5052 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5053 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5054 				       MGMT_STATUS_NOT_SUPPORTED);
5055 
5056 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5057 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5058 				       MGMT_STATUS_REJECTED);
5059 
5060 	if (cp->val != 0x00 && cp->val != 0x01)
5061 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5062 				       MGMT_STATUS_INVALID_PARAMS);
5063 
5064 	hci_dev_lock(hdev);
5065 
5066 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5067 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5068 		goto unlock;
5069 	}
5070 
5071 	if (!hdev_is_powered(hdev)) {
5072 		if (!cp->val) {
5073 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5074 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5075 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5076 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5077 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5078 		}
5079 
5080 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5081 
5082 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5083 		if (err < 0)
5084 			goto unlock;
5085 
5086 		err = new_settings(hdev, sk);
5087 		goto unlock;
5088 	}
5089 
5090 	/* Reject disabling when powered on */
5091 	if (!cp->val) {
5092 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5093 				      MGMT_STATUS_REJECTED);
5094 		goto unlock;
5095 	} else {
5096 		/* When configuring a dual-mode controller to operate
5097 		 * with LE only and using a static address, then switching
5098 		 * BR/EDR back on is not allowed.
5099 		 *
5100 		 * Dual-mode controllers shall operate with the public
5101 		 * address as its identity address for BR/EDR and LE. So
5102 		 * reject the attempt to create an invalid configuration.
5103 		 *
5104 		 * The same restrictions applies when secure connections
5105 		 * has been enabled. For BR/EDR this is a controller feature
5106 		 * while for LE it is a host stack feature. This means that
5107 		 * switching BR/EDR back on when secure connections has been
5108 		 * enabled is not a supported transaction.
5109 		 */
5110 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5111 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5112 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5113 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5114 					      MGMT_STATUS_REJECTED);
5115 			goto unlock;
5116 		}
5117 	}
5118 
5119 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5120 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5121 				      MGMT_STATUS_BUSY);
5122 		goto unlock;
5123 	}
5124 
5125 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5126 	if (!cmd) {
5127 		err = -ENOMEM;
5128 		goto unlock;
5129 	}
5130 
5131 	/* We need to flip the bit already here so that update_adv_data
5132 	 * generates the correct flags.
5133 	 */
5134 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5135 
5136 	hci_req_init(&req, hdev);
5137 
5138 	write_fast_connectable(&req, false);
5139 	__hci_update_page_scan(&req);
5140 
5141 	/* Since only the advertising data flags will change, there
5142 	 * is no need to update the scan response data.
5143 	 */
5144 	update_adv_data(&req);
5145 
5146 	err = hci_req_run(&req, set_bredr_complete);
5147 	if (err < 0)
5148 		mgmt_pending_remove(cmd);
5149 
5150 unlock:
5151 	hci_dev_unlock(hdev);
5152 	return err;
5153 }
5154 
5155 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5156 {
5157 	struct mgmt_pending_cmd *cmd;
5158 	struct mgmt_mode *cp;
5159 
5160 	BT_DBG("%s status %u", hdev->name, status);
5161 
5162 	hci_dev_lock(hdev);
5163 
5164 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5165 	if (!cmd)
5166 		goto unlock;
5167 
5168 	if (status) {
5169 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5170 			        mgmt_status(status));
5171 		goto remove;
5172 	}
5173 
5174 	cp = cmd->param;
5175 
5176 	switch (cp->val) {
5177 	case 0x00:
5178 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5179 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5180 		break;
5181 	case 0x01:
5182 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5183 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5184 		break;
5185 	case 0x02:
5186 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5187 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5188 		break;
5189 	}
5190 
5191 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5192 	new_settings(hdev, cmd->sk);
5193 
5194 remove:
5195 	mgmt_pending_remove(cmd);
5196 unlock:
5197 	hci_dev_unlock(hdev);
5198 }
5199 
5200 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5201 			   void *data, u16 len)
5202 {
5203 	struct mgmt_mode *cp = data;
5204 	struct mgmt_pending_cmd *cmd;
5205 	struct hci_request req;
5206 	u8 val;
5207 	int err;
5208 
5209 	BT_DBG("request for %s", hdev->name);
5210 
5211 	if (!lmp_sc_capable(hdev) &&
5212 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5214 				       MGMT_STATUS_NOT_SUPPORTED);
5215 
5216 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5217 	    lmp_sc_capable(hdev) &&
5218 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5219 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5220 				       MGMT_STATUS_REJECTED);
5221 
5222 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5223 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5224 				  MGMT_STATUS_INVALID_PARAMS);
5225 
5226 	hci_dev_lock(hdev);
5227 
5228 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5229 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5230 		bool changed;
5231 
5232 		if (cp->val) {
5233 			changed = !hci_dev_test_and_set_flag(hdev,
5234 							     HCI_SC_ENABLED);
5235 			if (cp->val == 0x02)
5236 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5237 			else
5238 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5239 		} else {
5240 			changed = hci_dev_test_and_clear_flag(hdev,
5241 							      HCI_SC_ENABLED);
5242 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5243 		}
5244 
5245 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5246 		if (err < 0)
5247 			goto failed;
5248 
5249 		if (changed)
5250 			err = new_settings(hdev, sk);
5251 
5252 		goto failed;
5253 	}
5254 
5255 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5256 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5257 				      MGMT_STATUS_BUSY);
5258 		goto failed;
5259 	}
5260 
5261 	val = !!cp->val;
5262 
5263 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5264 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5265 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5266 		goto failed;
5267 	}
5268 
5269 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5270 	if (!cmd) {
5271 		err = -ENOMEM;
5272 		goto failed;
5273 	}
5274 
5275 	hci_req_init(&req, hdev);
5276 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5277 	err = hci_req_run(&req, sc_enable_complete);
5278 	if (err < 0) {
5279 		mgmt_pending_remove(cmd);
5280 		goto failed;
5281 	}
5282 
5283 failed:
5284 	hci_dev_unlock(hdev);
5285 	return err;
5286 }
5287 
5288 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5289 			  void *data, u16 len)
5290 {
5291 	struct mgmt_mode *cp = data;
5292 	bool changed, use_changed;
5293 	int err;
5294 
5295 	BT_DBG("request for %s", hdev->name);
5296 
5297 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5299 				       MGMT_STATUS_INVALID_PARAMS);
5300 
5301 	hci_dev_lock(hdev);
5302 
5303 	if (cp->val)
5304 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5305 	else
5306 		changed = hci_dev_test_and_clear_flag(hdev,
5307 						      HCI_KEEP_DEBUG_KEYS);
5308 
5309 	if (cp->val == 0x02)
5310 		use_changed = !hci_dev_test_and_set_flag(hdev,
5311 							 HCI_USE_DEBUG_KEYS);
5312 	else
5313 		use_changed = hci_dev_test_and_clear_flag(hdev,
5314 							  HCI_USE_DEBUG_KEYS);
5315 
5316 	if (hdev_is_powered(hdev) && use_changed &&
5317 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5318 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5319 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5320 			     sizeof(mode), &mode);
5321 	}
5322 
5323 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5324 	if (err < 0)
5325 		goto unlock;
5326 
5327 	if (changed)
5328 		err = new_settings(hdev, sk);
5329 
5330 unlock:
5331 	hci_dev_unlock(hdev);
5332 	return err;
5333 }
5334 
5335 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5336 		       u16 len)
5337 {
5338 	struct mgmt_cp_set_privacy *cp = cp_data;
5339 	bool changed;
5340 	int err;
5341 
5342 	BT_DBG("request for %s", hdev->name);
5343 
5344 	if (!lmp_le_capable(hdev))
5345 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5346 				       MGMT_STATUS_NOT_SUPPORTED);
5347 
5348 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5350 				       MGMT_STATUS_INVALID_PARAMS);
5351 
5352 	if (hdev_is_powered(hdev))
5353 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5354 				       MGMT_STATUS_REJECTED);
5355 
5356 	hci_dev_lock(hdev);
5357 
5358 	/* If user space supports this command it is also expected to
5359 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5360 	 */
5361 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5362 
5363 	if (cp->privacy) {
5364 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5365 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5366 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5367 	} else {
5368 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5369 		memset(hdev->irk, 0, sizeof(hdev->irk));
5370 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5371 	}
5372 
5373 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5374 	if (err < 0)
5375 		goto unlock;
5376 
5377 	if (changed)
5378 		err = new_settings(hdev, sk);
5379 
5380 unlock:
5381 	hci_dev_unlock(hdev);
5382 	return err;
5383 }
5384 
5385 static bool irk_is_valid(struct mgmt_irk_info *irk)
5386 {
5387 	switch (irk->addr.type) {
5388 	case BDADDR_LE_PUBLIC:
5389 		return true;
5390 
5391 	case BDADDR_LE_RANDOM:
5392 		/* Two most significant bits shall be set */
5393 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5394 			return false;
5395 		return true;
5396 	}
5397 
5398 	return false;
5399 }
5400 
5401 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5402 		     u16 len)
5403 {
5404 	struct mgmt_cp_load_irks *cp = cp_data;
5405 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5406 				   sizeof(struct mgmt_irk_info));
5407 	u16 irk_count, expected_len;
5408 	int i, err;
5409 
5410 	BT_DBG("request for %s", hdev->name);
5411 
5412 	if (!lmp_le_capable(hdev))
5413 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5414 				       MGMT_STATUS_NOT_SUPPORTED);
5415 
5416 	irk_count = __le16_to_cpu(cp->irk_count);
5417 	if (irk_count > max_irk_count) {
5418 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5420 				       MGMT_STATUS_INVALID_PARAMS);
5421 	}
5422 
5423 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5424 	if (expected_len != len) {
5425 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5426 		       expected_len, len);
5427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5428 				       MGMT_STATUS_INVALID_PARAMS);
5429 	}
5430 
5431 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5432 
5433 	for (i = 0; i < irk_count; i++) {
5434 		struct mgmt_irk_info *key = &cp->irks[i];
5435 
5436 		if (!irk_is_valid(key))
5437 			return mgmt_cmd_status(sk, hdev->id,
5438 					       MGMT_OP_LOAD_IRKS,
5439 					       MGMT_STATUS_INVALID_PARAMS);
5440 	}
5441 
5442 	hci_dev_lock(hdev);
5443 
5444 	hci_smp_irks_clear(hdev);
5445 
5446 	for (i = 0; i < irk_count; i++) {
5447 		struct mgmt_irk_info *irk = &cp->irks[i];
5448 		u8 addr_type;
5449 
5450 		if (irk->addr.type == BDADDR_LE_PUBLIC)
5451 			addr_type = ADDR_LE_DEV_PUBLIC;
5452 		else
5453 			addr_type = ADDR_LE_DEV_RANDOM;
5454 
5455 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5456 			    BDADDR_ANY);
5457 	}
5458 
5459 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5460 
5461 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5462 
5463 	hci_dev_unlock(hdev);
5464 
5465 	return err;
5466 }
5467 
5468 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5469 {
5470 	if (key->master != 0x00 && key->master != 0x01)
5471 		return false;
5472 
5473 	switch (key->addr.type) {
5474 	case BDADDR_LE_PUBLIC:
5475 		return true;
5476 
5477 	case BDADDR_LE_RANDOM:
5478 		/* Two most significant bits shall be set */
5479 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5480 			return false;
5481 		return true;
5482 	}
5483 
5484 	return false;
5485 }
5486 
5487 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5488 			       void *cp_data, u16 len)
5489 {
5490 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5491 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5492 				   sizeof(struct mgmt_ltk_info));
5493 	u16 key_count, expected_len;
5494 	int i, err;
5495 
5496 	BT_DBG("request for %s", hdev->name);
5497 
5498 	if (!lmp_le_capable(hdev))
5499 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5500 				       MGMT_STATUS_NOT_SUPPORTED);
5501 
5502 	key_count = __le16_to_cpu(cp->key_count);
5503 	if (key_count > max_key_count) {
5504 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5505 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5506 				       MGMT_STATUS_INVALID_PARAMS);
5507 	}
5508 
5509 	expected_len = sizeof(*cp) + key_count *
5510 					sizeof(struct mgmt_ltk_info);
5511 	if (expected_len != len) {
5512 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5513 		       expected_len, len);
5514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5515 				       MGMT_STATUS_INVALID_PARAMS);
5516 	}
5517 
5518 	BT_DBG("%s key_count %u", hdev->name, key_count);
5519 
5520 	for (i = 0; i < key_count; i++) {
5521 		struct mgmt_ltk_info *key = &cp->keys[i];
5522 
5523 		if (!ltk_is_valid(key))
5524 			return mgmt_cmd_status(sk, hdev->id,
5525 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5526 					       MGMT_STATUS_INVALID_PARAMS);
5527 	}
5528 
5529 	hci_dev_lock(hdev);
5530 
5531 	hci_smp_ltks_clear(hdev);
5532 
5533 	for (i = 0; i < key_count; i++) {
5534 		struct mgmt_ltk_info *key = &cp->keys[i];
5535 		u8 type, addr_type, authenticated;
5536 
5537 		if (key->addr.type == BDADDR_LE_PUBLIC)
5538 			addr_type = ADDR_LE_DEV_PUBLIC;
5539 		else
5540 			addr_type = ADDR_LE_DEV_RANDOM;
5541 
5542 		switch (key->type) {
5543 		case MGMT_LTK_UNAUTHENTICATED:
5544 			authenticated = 0x00;
5545 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5546 			break;
5547 		case MGMT_LTK_AUTHENTICATED:
5548 			authenticated = 0x01;
5549 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5550 			break;
5551 		case MGMT_LTK_P256_UNAUTH:
5552 			authenticated = 0x00;
5553 			type = SMP_LTK_P256;
5554 			break;
5555 		case MGMT_LTK_P256_AUTH:
5556 			authenticated = 0x01;
5557 			type = SMP_LTK_P256;
5558 			break;
5559 		case MGMT_LTK_P256_DEBUG:
5560 			authenticated = 0x00;
5561 			type = SMP_LTK_P256_DEBUG;
5562 		default:
5563 			continue;
5564 		}
5565 
5566 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5567 			    authenticated, key->val, key->enc_size, key->ediv,
5568 			    key->rand);
5569 	}
5570 
5571 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5572 			   NULL, 0);
5573 
5574 	hci_dev_unlock(hdev);
5575 
5576 	return err;
5577 }
5578 
5579 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5580 {
5581 	struct hci_conn *conn = cmd->user_data;
5582 	struct mgmt_rp_get_conn_info rp;
5583 	int err;
5584 
5585 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5586 
5587 	if (status == MGMT_STATUS_SUCCESS) {
5588 		rp.rssi = conn->rssi;
5589 		rp.tx_power = conn->tx_power;
5590 		rp.max_tx_power = conn->max_tx_power;
5591 	} else {
5592 		rp.rssi = HCI_RSSI_INVALID;
5593 		rp.tx_power = HCI_TX_POWER_INVALID;
5594 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5595 	}
5596 
5597 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5598 				status, &rp, sizeof(rp));
5599 
5600 	hci_conn_drop(conn);
5601 	hci_conn_put(conn);
5602 
5603 	return err;
5604 }
5605 
5606 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5607 				       u16 opcode)
5608 {
5609 	struct hci_cp_read_rssi *cp;
5610 	struct mgmt_pending_cmd *cmd;
5611 	struct hci_conn *conn;
5612 	u16 handle;
5613 	u8 status;
5614 
5615 	BT_DBG("status 0x%02x", hci_status);
5616 
5617 	hci_dev_lock(hdev);
5618 
5619 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5620 	 * Level so we check which one was last sent to retrieve connection
5621 	 * handle.  Both commands have handle as first parameter so it's safe to
5622 	 * cast data on the same command struct.
5623 	 *
5624 	 * First command sent is always Read RSSI and we fail only if it fails.
5625 	 * In other case we simply override error to indicate success as we
5626 	 * already remembered if TX power value is actually valid.
5627 	 */
5628 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5629 	if (!cp) {
5630 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5631 		status = MGMT_STATUS_SUCCESS;
5632 	} else {
5633 		status = mgmt_status(hci_status);
5634 	}
5635 
5636 	if (!cp) {
5637 		BT_ERR("invalid sent_cmd in conn_info response");
5638 		goto unlock;
5639 	}
5640 
5641 	handle = __le16_to_cpu(cp->handle);
5642 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5643 	if (!conn) {
5644 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5645 		goto unlock;
5646 	}
5647 
5648 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5649 	if (!cmd)
5650 		goto unlock;
5651 
5652 	cmd->cmd_complete(cmd, status);
5653 	mgmt_pending_remove(cmd);
5654 
5655 unlock:
5656 	hci_dev_unlock(hdev);
5657 }
5658 
5659 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5660 			 u16 len)
5661 {
5662 	struct mgmt_cp_get_conn_info *cp = data;
5663 	struct mgmt_rp_get_conn_info rp;
5664 	struct hci_conn *conn;
5665 	unsigned long conn_info_age;
5666 	int err = 0;
5667 
5668 	BT_DBG("%s", hdev->name);
5669 
5670 	memset(&rp, 0, sizeof(rp));
5671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5672 	rp.addr.type = cp->addr.type;
5673 
5674 	if (!bdaddr_type_is_valid(cp->addr.type))
5675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5676 					 MGMT_STATUS_INVALID_PARAMS,
5677 					 &rp, sizeof(rp));
5678 
5679 	hci_dev_lock(hdev);
5680 
5681 	if (!hdev_is_powered(hdev)) {
5682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5683 					MGMT_STATUS_NOT_POWERED, &rp,
5684 					sizeof(rp));
5685 		goto unlock;
5686 	}
5687 
5688 	if (cp->addr.type == BDADDR_BREDR)
5689 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5690 					       &cp->addr.bdaddr);
5691 	else
5692 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5693 
5694 	if (!conn || conn->state != BT_CONNECTED) {
5695 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5696 					MGMT_STATUS_NOT_CONNECTED, &rp,
5697 					sizeof(rp));
5698 		goto unlock;
5699 	}
5700 
5701 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5703 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5704 		goto unlock;
5705 	}
5706 
5707 	/* To avoid client trying to guess when to poll again for information we
5708 	 * calculate conn info age as random value between min/max set in hdev.
5709 	 */
5710 	conn_info_age = hdev->conn_info_min_age +
5711 			prandom_u32_max(hdev->conn_info_max_age -
5712 					hdev->conn_info_min_age);
5713 
5714 	/* Query controller to refresh cached values if they are too old or were
5715 	 * never read.
5716 	 */
5717 	if (time_after(jiffies, conn->conn_info_timestamp +
5718 		       msecs_to_jiffies(conn_info_age)) ||
5719 	    !conn->conn_info_timestamp) {
5720 		struct hci_request req;
5721 		struct hci_cp_read_tx_power req_txp_cp;
5722 		struct hci_cp_read_rssi req_rssi_cp;
5723 		struct mgmt_pending_cmd *cmd;
5724 
5725 		hci_req_init(&req, hdev);
5726 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5727 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5728 			    &req_rssi_cp);
5729 
5730 		/* For LE links TX power does not change thus we don't need to
5731 		 * query for it once value is known.
5732 		 */
5733 		if (!bdaddr_type_is_le(cp->addr.type) ||
5734 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5735 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5736 			req_txp_cp.type = 0x00;
5737 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5738 				    sizeof(req_txp_cp), &req_txp_cp);
5739 		}
5740 
5741 		/* Max TX power needs to be read only once per connection */
5742 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5743 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5744 			req_txp_cp.type = 0x01;
5745 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5746 				    sizeof(req_txp_cp), &req_txp_cp);
5747 		}
5748 
5749 		err = hci_req_run(&req, conn_info_refresh_complete);
5750 		if (err < 0)
5751 			goto unlock;
5752 
5753 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5754 				       data, len);
5755 		if (!cmd) {
5756 			err = -ENOMEM;
5757 			goto unlock;
5758 		}
5759 
5760 		hci_conn_hold(conn);
5761 		cmd->user_data = hci_conn_get(conn);
5762 		cmd->cmd_complete = conn_info_cmd_complete;
5763 
5764 		conn->conn_info_timestamp = jiffies;
5765 	} else {
5766 		/* Cache is valid, just reply with values cached in hci_conn */
5767 		rp.rssi = conn->rssi;
5768 		rp.tx_power = conn->tx_power;
5769 		rp.max_tx_power = conn->max_tx_power;
5770 
5771 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5772 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5773 	}
5774 
5775 unlock:
5776 	hci_dev_unlock(hdev);
5777 	return err;
5778 }
5779 
5780 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5781 {
5782 	struct hci_conn *conn = cmd->user_data;
5783 	struct mgmt_rp_get_clock_info rp;
5784 	struct hci_dev *hdev;
5785 	int err;
5786 
5787 	memset(&rp, 0, sizeof(rp));
5788 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5789 
5790 	if (status)
5791 		goto complete;
5792 
5793 	hdev = hci_dev_get(cmd->index);
5794 	if (hdev) {
5795 		rp.local_clock = cpu_to_le32(hdev->clock);
5796 		hci_dev_put(hdev);
5797 	}
5798 
5799 	if (conn) {
5800 		rp.piconet_clock = cpu_to_le32(conn->clock);
5801 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5802 	}
5803 
5804 complete:
5805 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5806 				sizeof(rp));
5807 
5808 	if (conn) {
5809 		hci_conn_drop(conn);
5810 		hci_conn_put(conn);
5811 	}
5812 
5813 	return err;
5814 }
5815 
5816 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5817 {
5818 	struct hci_cp_read_clock *hci_cp;
5819 	struct mgmt_pending_cmd *cmd;
5820 	struct hci_conn *conn;
5821 
5822 	BT_DBG("%s status %u", hdev->name, status);
5823 
5824 	hci_dev_lock(hdev);
5825 
5826 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5827 	if (!hci_cp)
5828 		goto unlock;
5829 
5830 	if (hci_cp->which) {
5831 		u16 handle = __le16_to_cpu(hci_cp->handle);
5832 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5833 	} else {
5834 		conn = NULL;
5835 	}
5836 
5837 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5838 	if (!cmd)
5839 		goto unlock;
5840 
5841 	cmd->cmd_complete(cmd, mgmt_status(status));
5842 	mgmt_pending_remove(cmd);
5843 
5844 unlock:
5845 	hci_dev_unlock(hdev);
5846 }
5847 
5848 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5849 			 u16 len)
5850 {
5851 	struct mgmt_cp_get_clock_info *cp = data;
5852 	struct mgmt_rp_get_clock_info rp;
5853 	struct hci_cp_read_clock hci_cp;
5854 	struct mgmt_pending_cmd *cmd;
5855 	struct hci_request req;
5856 	struct hci_conn *conn;
5857 	int err;
5858 
5859 	BT_DBG("%s", hdev->name);
5860 
5861 	memset(&rp, 0, sizeof(rp));
5862 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5863 	rp.addr.type = cp->addr.type;
5864 
5865 	if (cp->addr.type != BDADDR_BREDR)
5866 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5867 					 MGMT_STATUS_INVALID_PARAMS,
5868 					 &rp, sizeof(rp));
5869 
5870 	hci_dev_lock(hdev);
5871 
5872 	if (!hdev_is_powered(hdev)) {
5873 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5874 					MGMT_STATUS_NOT_POWERED, &rp,
5875 					sizeof(rp));
5876 		goto unlock;
5877 	}
5878 
5879 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5880 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5881 					       &cp->addr.bdaddr);
5882 		if (!conn || conn->state != BT_CONNECTED) {
5883 			err = mgmt_cmd_complete(sk, hdev->id,
5884 						MGMT_OP_GET_CLOCK_INFO,
5885 						MGMT_STATUS_NOT_CONNECTED,
5886 						&rp, sizeof(rp));
5887 			goto unlock;
5888 		}
5889 	} else {
5890 		conn = NULL;
5891 	}
5892 
5893 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5894 	if (!cmd) {
5895 		err = -ENOMEM;
5896 		goto unlock;
5897 	}
5898 
5899 	cmd->cmd_complete = clock_info_cmd_complete;
5900 
5901 	hci_req_init(&req, hdev);
5902 
5903 	memset(&hci_cp, 0, sizeof(hci_cp));
5904 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5905 
5906 	if (conn) {
5907 		hci_conn_hold(conn);
5908 		cmd->user_data = hci_conn_get(conn);
5909 
5910 		hci_cp.handle = cpu_to_le16(conn->handle);
5911 		hci_cp.which = 0x01; /* Piconet clock */
5912 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5913 	}
5914 
5915 	err = hci_req_run(&req, get_clock_info_complete);
5916 	if (err < 0)
5917 		mgmt_pending_remove(cmd);
5918 
5919 unlock:
5920 	hci_dev_unlock(hdev);
5921 	return err;
5922 }
5923 
5924 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5925 {
5926 	struct hci_conn *conn;
5927 
5928 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5929 	if (!conn)
5930 		return false;
5931 
5932 	if (conn->dst_type != type)
5933 		return false;
5934 
5935 	if (conn->state != BT_CONNECTED)
5936 		return false;
5937 
5938 	return true;
5939 }
5940 
5941 /* This function requires the caller holds hdev->lock */
5942 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5943 			       u8 addr_type, u8 auto_connect)
5944 {
5945 	struct hci_dev *hdev = req->hdev;
5946 	struct hci_conn_params *params;
5947 
5948 	params = hci_conn_params_add(hdev, addr, addr_type);
5949 	if (!params)
5950 		return -EIO;
5951 
5952 	if (params->auto_connect == auto_connect)
5953 		return 0;
5954 
5955 	list_del_init(&params->action);
5956 
5957 	switch (auto_connect) {
5958 	case HCI_AUTO_CONN_DISABLED:
5959 	case HCI_AUTO_CONN_LINK_LOSS:
5960 		__hci_update_background_scan(req);
5961 		break;
5962 	case HCI_AUTO_CONN_REPORT:
5963 		list_add(&params->action, &hdev->pend_le_reports);
5964 		__hci_update_background_scan(req);
5965 		break;
5966 	case HCI_AUTO_CONN_DIRECT:
5967 	case HCI_AUTO_CONN_ALWAYS:
5968 		if (!is_connected(hdev, addr, addr_type)) {
5969 			list_add(&params->action, &hdev->pend_le_conns);
5970 			__hci_update_background_scan(req);
5971 		}
5972 		break;
5973 	}
5974 
5975 	params->auto_connect = auto_connect;
5976 
5977 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5978 	       auto_connect);
5979 
5980 	return 0;
5981 }
5982 
5983 static void device_added(struct sock *sk, struct hci_dev *hdev,
5984 			 bdaddr_t *bdaddr, u8 type, u8 action)
5985 {
5986 	struct mgmt_ev_device_added ev;
5987 
5988 	bacpy(&ev.addr.bdaddr, bdaddr);
5989 	ev.addr.type = type;
5990 	ev.action = action;
5991 
5992 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5993 }
5994 
5995 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5996 {
5997 	struct mgmt_pending_cmd *cmd;
5998 
5999 	BT_DBG("status 0x%02x", status);
6000 
6001 	hci_dev_lock(hdev);
6002 
6003 	cmd = pending_find(MGMT_OP_ADD_DEVICE, hdev);
6004 	if (!cmd)
6005 		goto unlock;
6006 
6007 	cmd->cmd_complete(cmd, mgmt_status(status));
6008 	mgmt_pending_remove(cmd);
6009 
6010 unlock:
6011 	hci_dev_unlock(hdev);
6012 }
6013 
6014 static int add_device(struct sock *sk, struct hci_dev *hdev,
6015 		      void *data, u16 len)
6016 {
6017 	struct mgmt_cp_add_device *cp = data;
6018 	struct mgmt_pending_cmd *cmd;
6019 	struct hci_request req;
6020 	u8 auto_conn, addr_type;
6021 	int err;
6022 
6023 	BT_DBG("%s", hdev->name);
6024 
6025 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6026 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6027 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6028 					 MGMT_STATUS_INVALID_PARAMS,
6029 					 &cp->addr, sizeof(cp->addr));
6030 
6031 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6032 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6033 					 MGMT_STATUS_INVALID_PARAMS,
6034 					 &cp->addr, sizeof(cp->addr));
6035 
6036 	hci_req_init(&req, hdev);
6037 
6038 	hci_dev_lock(hdev);
6039 
6040 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
6041 	if (!cmd) {
6042 		err = -ENOMEM;
6043 		goto unlock;
6044 	}
6045 
6046 	cmd->cmd_complete = addr_cmd_complete;
6047 
6048 	if (cp->addr.type == BDADDR_BREDR) {
6049 		/* Only incoming connections action is supported for now */
6050 		if (cp->action != 0x01) {
6051 			err = cmd->cmd_complete(cmd,
6052 						MGMT_STATUS_INVALID_PARAMS);
6053 			mgmt_pending_remove(cmd);
6054 			goto unlock;
6055 		}
6056 
6057 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
6058 					  cp->addr.type);
6059 		if (err)
6060 			goto unlock;
6061 
6062 		__hci_update_page_scan(&req);
6063 
6064 		goto added;
6065 	}
6066 
6067 	if (cp->addr.type == BDADDR_LE_PUBLIC)
6068 		addr_type = ADDR_LE_DEV_PUBLIC;
6069 	else
6070 		addr_type = ADDR_LE_DEV_RANDOM;
6071 
6072 	if (cp->action == 0x02)
6073 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6074 	else if (cp->action == 0x01)
6075 		auto_conn = HCI_AUTO_CONN_DIRECT;
6076 	else
6077 		auto_conn = HCI_AUTO_CONN_REPORT;
6078 
6079 	/* If the connection parameters don't exist for this device,
6080 	 * they will be created and configured with defaults.
6081 	 */
6082 	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
6083 				auto_conn) < 0) {
6084 		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
6085 		mgmt_pending_remove(cmd);
6086 		goto unlock;
6087 	}
6088 
6089 added:
6090 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6091 
6092 	err = hci_req_run(&req, add_device_complete);
6093 	if (err < 0) {
6094 		/* ENODATA means no HCI commands were needed (e.g. if
6095 		 * the adapter is powered off).
6096 		 */
6097 		if (err == -ENODATA)
6098 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6099 		mgmt_pending_remove(cmd);
6100 	}
6101 
6102 unlock:
6103 	hci_dev_unlock(hdev);
6104 	return err;
6105 }
6106 
6107 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6108 			   bdaddr_t *bdaddr, u8 type)
6109 {
6110 	struct mgmt_ev_device_removed ev;
6111 
6112 	bacpy(&ev.addr.bdaddr, bdaddr);
6113 	ev.addr.type = type;
6114 
6115 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6116 }
6117 
6118 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6119 {
6120 	struct mgmt_pending_cmd *cmd;
6121 
6122 	BT_DBG("status 0x%02x", status);
6123 
6124 	hci_dev_lock(hdev);
6125 
6126 	cmd = pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
6127 	if (!cmd)
6128 		goto unlock;
6129 
6130 	cmd->cmd_complete(cmd, mgmt_status(status));
6131 	mgmt_pending_remove(cmd);
6132 
6133 unlock:
6134 	hci_dev_unlock(hdev);
6135 }
6136 
6137 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6138 			 void *data, u16 len)
6139 {
6140 	struct mgmt_cp_remove_device *cp = data;
6141 	struct mgmt_pending_cmd *cmd;
6142 	struct hci_request req;
6143 	int err;
6144 
6145 	BT_DBG("%s", hdev->name);
6146 
6147 	hci_req_init(&req, hdev);
6148 
6149 	hci_dev_lock(hdev);
6150 
6151 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
6152 	if (!cmd) {
6153 		err = -ENOMEM;
6154 		goto unlock;
6155 	}
6156 
6157 	cmd->cmd_complete = addr_cmd_complete;
6158 
6159 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6160 		struct hci_conn_params *params;
6161 		u8 addr_type;
6162 
6163 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6164 			err = cmd->cmd_complete(cmd,
6165 						MGMT_STATUS_INVALID_PARAMS);
6166 			mgmt_pending_remove(cmd);
6167 			goto unlock;
6168 		}
6169 
6170 		if (cp->addr.type == BDADDR_BREDR) {
6171 			err = hci_bdaddr_list_del(&hdev->whitelist,
6172 						  &cp->addr.bdaddr,
6173 						  cp->addr.type);
6174 			if (err) {
6175 				err = cmd->cmd_complete(cmd,
6176 							MGMT_STATUS_INVALID_PARAMS);
6177 				mgmt_pending_remove(cmd);
6178 				goto unlock;
6179 			}
6180 
6181 			__hci_update_page_scan(&req);
6182 
6183 			device_removed(sk, hdev, &cp->addr.bdaddr,
6184 				       cp->addr.type);
6185 			goto complete;
6186 		}
6187 
6188 		if (cp->addr.type == BDADDR_LE_PUBLIC)
6189 			addr_type = ADDR_LE_DEV_PUBLIC;
6190 		else
6191 			addr_type = ADDR_LE_DEV_RANDOM;
6192 
6193 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6194 						addr_type);
6195 		if (!params) {
6196 			err = cmd->cmd_complete(cmd,
6197 						MGMT_STATUS_INVALID_PARAMS);
6198 			mgmt_pending_remove(cmd);
6199 			goto unlock;
6200 		}
6201 
6202 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6203 			err = cmd->cmd_complete(cmd,
6204 						MGMT_STATUS_INVALID_PARAMS);
6205 			mgmt_pending_remove(cmd);
6206 			goto unlock;
6207 		}
6208 
6209 		list_del(&params->action);
6210 		list_del(&params->list);
6211 		kfree(params);
6212 		__hci_update_background_scan(&req);
6213 
6214 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6215 	} else {
6216 		struct hci_conn_params *p, *tmp;
6217 		struct bdaddr_list *b, *btmp;
6218 
6219 		if (cp->addr.type) {
6220 			err = cmd->cmd_complete(cmd,
6221 						MGMT_STATUS_INVALID_PARAMS);
6222 			mgmt_pending_remove(cmd);
6223 			goto unlock;
6224 		}
6225 
6226 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6227 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6228 			list_del(&b->list);
6229 			kfree(b);
6230 		}
6231 
6232 		__hci_update_page_scan(&req);
6233 
6234 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6235 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6236 				continue;
6237 			device_removed(sk, hdev, &p->addr, p->addr_type);
6238 			list_del(&p->action);
6239 			list_del(&p->list);
6240 			kfree(p);
6241 		}
6242 
6243 		BT_DBG("All LE connection parameters were removed");
6244 
6245 		__hci_update_background_scan(&req);
6246 	}
6247 
6248 complete:
6249 	err = hci_req_run(&req, remove_device_complete);
6250 	if (err < 0) {
6251 		/* ENODATA means no HCI commands were needed (e.g. if
6252 		 * the adapter is powered off).
6253 		 */
6254 		if (err == -ENODATA)
6255 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6256 		mgmt_pending_remove(cmd);
6257 	}
6258 
6259 unlock:
6260 	hci_dev_unlock(hdev);
6261 	return err;
6262 }
6263 
6264 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6265 			   u16 len)
6266 {
6267 	struct mgmt_cp_load_conn_param *cp = data;
6268 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6269 				     sizeof(struct mgmt_conn_param));
6270 	u16 param_count, expected_len;
6271 	int i;
6272 
6273 	if (!lmp_le_capable(hdev))
6274 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6275 				       MGMT_STATUS_NOT_SUPPORTED);
6276 
6277 	param_count = __le16_to_cpu(cp->param_count);
6278 	if (param_count > max_param_count) {
6279 		BT_ERR("load_conn_param: too big param_count value %u",
6280 		       param_count);
6281 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6282 				       MGMT_STATUS_INVALID_PARAMS);
6283 	}
6284 
6285 	expected_len = sizeof(*cp) + param_count *
6286 					sizeof(struct mgmt_conn_param);
6287 	if (expected_len != len) {
6288 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6289 		       expected_len, len);
6290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6291 				       MGMT_STATUS_INVALID_PARAMS);
6292 	}
6293 
6294 	BT_DBG("%s param_count %u", hdev->name, param_count);
6295 
6296 	hci_dev_lock(hdev);
6297 
6298 	hci_conn_params_clear_disabled(hdev);
6299 
6300 	for (i = 0; i < param_count; i++) {
6301 		struct mgmt_conn_param *param = &cp->params[i];
6302 		struct hci_conn_params *hci_param;
6303 		u16 min, max, latency, timeout;
6304 		u8 addr_type;
6305 
6306 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6307 		       param->addr.type);
6308 
6309 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6310 			addr_type = ADDR_LE_DEV_PUBLIC;
6311 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6312 			addr_type = ADDR_LE_DEV_RANDOM;
6313 		} else {
6314 			BT_ERR("Ignoring invalid connection parameters");
6315 			continue;
6316 		}
6317 
6318 		min = le16_to_cpu(param->min_interval);
6319 		max = le16_to_cpu(param->max_interval);
6320 		latency = le16_to_cpu(param->latency);
6321 		timeout = le16_to_cpu(param->timeout);
6322 
6323 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6324 		       min, max, latency, timeout);
6325 
6326 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6327 			BT_ERR("Ignoring invalid connection parameters");
6328 			continue;
6329 		}
6330 
6331 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6332 						addr_type);
6333 		if (!hci_param) {
6334 			BT_ERR("Failed to add connection parameters");
6335 			continue;
6336 		}
6337 
6338 		hci_param->conn_min_interval = min;
6339 		hci_param->conn_max_interval = max;
6340 		hci_param->conn_latency = latency;
6341 		hci_param->supervision_timeout = timeout;
6342 	}
6343 
6344 	hci_dev_unlock(hdev);
6345 
6346 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6347 				 NULL, 0);
6348 }
6349 
6350 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6351 			       void *data, u16 len)
6352 {
6353 	struct mgmt_cp_set_external_config *cp = data;
6354 	bool changed;
6355 	int err;
6356 
6357 	BT_DBG("%s", hdev->name);
6358 
6359 	if (hdev_is_powered(hdev))
6360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6361 				       MGMT_STATUS_REJECTED);
6362 
6363 	if (cp->config != 0x00 && cp->config != 0x01)
6364 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6365 				         MGMT_STATUS_INVALID_PARAMS);
6366 
6367 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6369 				       MGMT_STATUS_NOT_SUPPORTED);
6370 
6371 	hci_dev_lock(hdev);
6372 
6373 	if (cp->config)
6374 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6375 	else
6376 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6377 
6378 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6379 	if (err < 0)
6380 		goto unlock;
6381 
6382 	if (!changed)
6383 		goto unlock;
6384 
6385 	err = new_options(hdev, sk);
6386 
6387 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6388 		mgmt_index_removed(hdev);
6389 
6390 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6391 			hci_dev_set_flag(hdev, HCI_CONFIG);
6392 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6393 
6394 			queue_work(hdev->req_workqueue, &hdev->power_on);
6395 		} else {
6396 			set_bit(HCI_RAW, &hdev->flags);
6397 			mgmt_index_added(hdev);
6398 		}
6399 	}
6400 
6401 unlock:
6402 	hci_dev_unlock(hdev);
6403 	return err;
6404 }
6405 
6406 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6407 			      void *data, u16 len)
6408 {
6409 	struct mgmt_cp_set_public_address *cp = data;
6410 	bool changed;
6411 	int err;
6412 
6413 	BT_DBG("%s", hdev->name);
6414 
6415 	if (hdev_is_powered(hdev))
6416 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6417 				       MGMT_STATUS_REJECTED);
6418 
6419 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6420 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6421 				       MGMT_STATUS_INVALID_PARAMS);
6422 
6423 	if (!hdev->set_bdaddr)
6424 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6425 				       MGMT_STATUS_NOT_SUPPORTED);
6426 
6427 	hci_dev_lock(hdev);
6428 
6429 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6430 	bacpy(&hdev->public_addr, &cp->bdaddr);
6431 
6432 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6433 	if (err < 0)
6434 		goto unlock;
6435 
6436 	if (!changed)
6437 		goto unlock;
6438 
6439 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6440 		err = new_options(hdev, sk);
6441 
6442 	if (is_configured(hdev)) {
6443 		mgmt_index_removed(hdev);
6444 
6445 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6446 
6447 		hci_dev_set_flag(hdev, HCI_CONFIG);
6448 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6449 
6450 		queue_work(hdev->req_workqueue, &hdev->power_on);
6451 	}
6452 
6453 unlock:
6454 	hci_dev_unlock(hdev);
6455 	return err;
6456 }
6457 
6458 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6459 				  u8 data_len)
6460 {
6461 	eir[eir_len++] = sizeof(type) + data_len;
6462 	eir[eir_len++] = type;
6463 	memcpy(&eir[eir_len], data, data_len);
6464 	eir_len += data_len;
6465 
6466 	return eir_len;
6467 }
6468 
6469 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6470 					     u16 opcode, struct sk_buff *skb)
6471 {
6472 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6473 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6474 	u8 *h192, *r192, *h256, *r256;
6475 	struct mgmt_pending_cmd *cmd;
6476 	u16 eir_len;
6477 	int err;
6478 
6479 	BT_DBG("%s status %u", hdev->name, status);
6480 
6481 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6482 	if (!cmd)
6483 		return;
6484 
6485 	mgmt_cp = cmd->param;
6486 
6487 	if (status) {
6488 		status = mgmt_status(status);
6489 		eir_len = 0;
6490 
6491 		h192 = NULL;
6492 		r192 = NULL;
6493 		h256 = NULL;
6494 		r256 = NULL;
6495 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6496 		struct hci_rp_read_local_oob_data *rp;
6497 
6498 		if (skb->len != sizeof(*rp)) {
6499 			status = MGMT_STATUS_FAILED;
6500 			eir_len = 0;
6501 		} else {
6502 			status = MGMT_STATUS_SUCCESS;
6503 			rp = (void *)skb->data;
6504 
6505 			eir_len = 5 + 18 + 18;
6506 			h192 = rp->hash;
6507 			r192 = rp->rand;
6508 			h256 = NULL;
6509 			r256 = NULL;
6510 		}
6511 	} else {
6512 		struct hci_rp_read_local_oob_ext_data *rp;
6513 
6514 		if (skb->len != sizeof(*rp)) {
6515 			status = MGMT_STATUS_FAILED;
6516 			eir_len = 0;
6517 		} else {
6518 			status = MGMT_STATUS_SUCCESS;
6519 			rp = (void *)skb->data;
6520 
6521 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6522 				eir_len = 5 + 18 + 18;
6523 				h192 = NULL;
6524 				r192 = NULL;
6525 			} else {
6526 				eir_len = 5 + 18 + 18 + 18 + 18;
6527 				h192 = rp->hash192;
6528 				r192 = rp->rand192;
6529 			}
6530 
6531 			h256 = rp->hash256;
6532 			r256 = rp->rand256;
6533 		}
6534 	}
6535 
6536 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6537 	if (!mgmt_rp)
6538 		goto done;
6539 
6540 	if (status)
6541 		goto send_rsp;
6542 
6543 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6544 				  hdev->dev_class, 3);
6545 
6546 	if (h192 && r192) {
6547 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6548 					  EIR_SSP_HASH_C192, h192, 16);
6549 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6550 					  EIR_SSP_RAND_R192, r192, 16);
6551 	}
6552 
6553 	if (h256 && r256) {
6554 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6555 					  EIR_SSP_HASH_C256, h256, 16);
6556 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6557 					  EIR_SSP_RAND_R256, r256, 16);
6558 	}
6559 
6560 send_rsp:
6561 	mgmt_rp->type = mgmt_cp->type;
6562 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6563 
6564 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6565 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6566 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6567 	if (err < 0 || status)
6568 		goto done;
6569 
6570 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6571 
6572 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6573 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6574 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6575 done:
6576 	kfree(mgmt_rp);
6577 	mgmt_pending_remove(cmd);
6578 }
6579 
6580 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6581 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6582 {
6583 	struct mgmt_pending_cmd *cmd;
6584 	struct hci_request req;
6585 	int err;
6586 
6587 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6588 			       cp, sizeof(*cp));
6589 	if (!cmd)
6590 		return -ENOMEM;
6591 
6592 	hci_req_init(&req, hdev);
6593 
6594 	if (bredr_sc_enabled(hdev))
6595 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6596 	else
6597 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6598 
6599 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6600 	if (err < 0) {
6601 		mgmt_pending_remove(cmd);
6602 		return err;
6603 	}
6604 
6605 	return 0;
6606 }
6607 
6608 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6609 				   void *data, u16 data_len)
6610 {
6611 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6612 	struct mgmt_rp_read_local_oob_ext_data *rp;
6613 	size_t rp_len;
6614 	u16 eir_len;
6615 	u8 status, flags, role, addr[7], hash[16], rand[16];
6616 	int err;
6617 
6618 	BT_DBG("%s", hdev->name);
6619 
6620 	if (hdev_is_powered(hdev)) {
6621 		switch (cp->type) {
6622 		case BIT(BDADDR_BREDR):
6623 			status = mgmt_bredr_support(hdev);
6624 			if (status)
6625 				eir_len = 0;
6626 			else
6627 				eir_len = 5;
6628 			break;
6629 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6630 			status = mgmt_le_support(hdev);
6631 			if (status)
6632 				eir_len = 0;
6633 			else
6634 				eir_len = 9 + 3 + 18 + 18 + 3;
6635 			break;
6636 		default:
6637 			status = MGMT_STATUS_INVALID_PARAMS;
6638 			eir_len = 0;
6639 			break;
6640 		}
6641 	} else {
6642 		status = MGMT_STATUS_NOT_POWERED;
6643 		eir_len = 0;
6644 	}
6645 
6646 	rp_len = sizeof(*rp) + eir_len;
6647 	rp = kmalloc(rp_len, GFP_ATOMIC);
6648 	if (!rp)
6649 		return -ENOMEM;
6650 
6651 	if (status)
6652 		goto complete;
6653 
6654 	hci_dev_lock(hdev);
6655 
6656 	eir_len = 0;
6657 	switch (cp->type) {
6658 	case BIT(BDADDR_BREDR):
6659 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6660 			err = read_local_ssp_oob_req(hdev, sk, cp);
6661 			hci_dev_unlock(hdev);
6662 			if (!err)
6663 				goto done;
6664 
6665 			status = MGMT_STATUS_FAILED;
6666 			goto complete;
6667 		} else {
6668 			eir_len = eir_append_data(rp->eir, eir_len,
6669 						  EIR_CLASS_OF_DEV,
6670 						  hdev->dev_class, 3);
6671 		}
6672 		break;
6673 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6674 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6675 		    smp_generate_oob(hdev, hash, rand) < 0) {
6676 			hci_dev_unlock(hdev);
6677 			status = MGMT_STATUS_FAILED;
6678 			goto complete;
6679 		}
6680 
6681 		/* This should return the active RPA, but since the RPA
6682 		 * is only programmed on demand, it is really hard to fill
6683 		 * this in at the moment. For now disallow retrieving
6684 		 * local out-of-band data when privacy is in use.
6685 		 *
6686 		 * Returning the identity address will not help here since
6687 		 * pairing happens before the identity resolving key is
6688 		 * known and thus the connection establishment happens
6689 		 * based on the RPA and not the identity address.
6690 		 */
6691 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6692 			hci_dev_unlock(hdev);
6693 			status = MGMT_STATUS_REJECTED;
6694 			goto complete;
6695 		}
6696 
6697 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6698 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6699 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6700 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6701 			memcpy(addr, &hdev->static_addr, 6);
6702 			addr[6] = 0x01;
6703 		} else {
6704 			memcpy(addr, &hdev->bdaddr, 6);
6705 			addr[6] = 0x00;
6706 		}
6707 
6708 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6709 					  addr, sizeof(addr));
6710 
6711 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6712 			role = 0x02;
6713 		else
6714 			role = 0x01;
6715 
6716 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6717 					  &role, sizeof(role));
6718 
6719 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6720 			eir_len = eir_append_data(rp->eir, eir_len,
6721 						  EIR_LE_SC_CONFIRM,
6722 						  hash, sizeof(hash));
6723 
6724 			eir_len = eir_append_data(rp->eir, eir_len,
6725 						  EIR_LE_SC_RANDOM,
6726 						  rand, sizeof(rand));
6727 		}
6728 
6729 		flags = get_adv_discov_flags(hdev);
6730 
6731 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6732 			flags |= LE_AD_NO_BREDR;
6733 
6734 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6735 					  &flags, sizeof(flags));
6736 		break;
6737 	}
6738 
6739 	hci_dev_unlock(hdev);
6740 
6741 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6742 
6743 	status = MGMT_STATUS_SUCCESS;
6744 
6745 complete:
6746 	rp->type = cp->type;
6747 	rp->eir_len = cpu_to_le16(eir_len);
6748 
6749 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6750 				status, rp, sizeof(*rp) + eir_len);
6751 	if (err < 0 || status)
6752 		goto done;
6753 
6754 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6755 				 rp, sizeof(*rp) + eir_len,
6756 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6757 
6758 done:
6759 	kfree(rp);
6760 
6761 	return err;
6762 }
6763 
6764 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6765 {
6766 	u32 flags = 0;
6767 
6768 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6769 	flags |= MGMT_ADV_FLAG_DISCOV;
6770 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6771 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6772 
6773 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
6774 		flags |= MGMT_ADV_FLAG_TX_POWER;
6775 
6776 	return flags;
6777 }
6778 
6779 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6780 			     void *data, u16 data_len)
6781 {
6782 	struct mgmt_rp_read_adv_features *rp;
6783 	size_t rp_len;
6784 	int err;
6785 	bool instance;
6786 	u32 supported_flags;
6787 
6788 	BT_DBG("%s", hdev->name);
6789 
6790 	if (!lmp_le_capable(hdev))
6791 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6792 				       MGMT_STATUS_REJECTED);
6793 
6794 	hci_dev_lock(hdev);
6795 
6796 	rp_len = sizeof(*rp);
6797 
6798 	/* Currently only one instance is supported, so just add 1 to the
6799 	 * response length.
6800 	 */
6801 	instance = hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE);
6802 	if (instance)
6803 		rp_len++;
6804 
6805 	rp = kmalloc(rp_len, GFP_ATOMIC);
6806 	if (!rp) {
6807 		hci_dev_unlock(hdev);
6808 		return -ENOMEM;
6809 	}
6810 
6811 	supported_flags = get_supported_adv_flags(hdev);
6812 
6813 	rp->supported_flags = cpu_to_le32(supported_flags);
6814 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6815 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6816 	rp->max_instances = 1;
6817 
6818 	/* Currently only one instance is supported, so simply return the
6819 	 * current instance number.
6820 	 */
6821 	if (instance) {
6822 		rp->num_instances = 1;
6823 		rp->instance[0] = 1;
6824 	} else {
6825 		rp->num_instances = 0;
6826 	}
6827 
6828 	hci_dev_unlock(hdev);
6829 
6830 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6831 				MGMT_STATUS_SUCCESS, rp, rp_len);
6832 
6833 	kfree(rp);
6834 
6835 	return err;
6836 }
6837 
6838 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6839 			      u8 len, bool is_adv_data)
6840 {
6841 	u8 max_len = HCI_MAX_AD_LENGTH;
6842 	int i, cur_len;
6843 	bool flags_managed = false;
6844 	bool tx_power_managed = false;
6845 	u32 flags_params = MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV |
6846 			   MGMT_ADV_FLAG_MANAGED_FLAGS;
6847 
6848 	if (is_adv_data && (adv_flags & flags_params)) {
6849 		flags_managed = true;
6850 		max_len -= 3;
6851 	}
6852 
6853 	if (is_adv_data && (adv_flags & MGMT_ADV_FLAG_TX_POWER)) {
6854 		tx_power_managed = true;
6855 		max_len -= 3;
6856 	}
6857 
6858 	if (len > max_len)
6859 		return false;
6860 
6861 	/* Make sure that the data is correctly formatted. */
6862 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6863 		cur_len = data[i];
6864 
6865 		if (flags_managed && data[i + 1] == EIR_FLAGS)
6866 			return false;
6867 
6868 		if (tx_power_managed && data[i + 1] == EIR_TX_POWER)
6869 			return false;
6870 
6871 		/* If the current field length would exceed the total data
6872 		 * length, then it's invalid.
6873 		 */
6874 		if (i + cur_len >= len)
6875 			return false;
6876 	}
6877 
6878 	return true;
6879 }
6880 
6881 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6882 				     u16 opcode)
6883 {
6884 	struct mgmt_pending_cmd *cmd;
6885 	struct mgmt_rp_add_advertising rp;
6886 
6887 	BT_DBG("status %d", status);
6888 
6889 	hci_dev_lock(hdev);
6890 
6891 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6892 
6893 	if (status) {
6894 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
6895 		memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
6896 		advertising_removed(cmd ? cmd->sk : NULL, hdev, 1);
6897 	}
6898 
6899 	if (!cmd)
6900 		goto unlock;
6901 
6902 	rp.instance = 0x01;
6903 
6904 	if (status)
6905 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6906 				mgmt_status(status));
6907 	else
6908 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6909 				  mgmt_status(status), &rp, sizeof(rp));
6910 
6911 	mgmt_pending_remove(cmd);
6912 
6913 unlock:
6914 	hci_dev_unlock(hdev);
6915 }
6916 
6917 static void adv_timeout_expired(struct work_struct *work)
6918 {
6919 	struct hci_dev *hdev = container_of(work, struct hci_dev,
6920 					    adv_instance.timeout_exp.work);
6921 
6922 	hdev->adv_instance.timeout = 0;
6923 
6924 	hci_dev_lock(hdev);
6925 	clear_adv_instance(hdev);
6926 	hci_dev_unlock(hdev);
6927 }
6928 
6929 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6930 			   void *data, u16 data_len)
6931 {
6932 	struct mgmt_cp_add_advertising *cp = data;
6933 	struct mgmt_rp_add_advertising rp;
6934 	u32 flags;
6935 	u32 supported_flags;
6936 	u8 status;
6937 	u16 timeout;
6938 	int err;
6939 	struct mgmt_pending_cmd *cmd;
6940 	struct hci_request req;
6941 
6942 	BT_DBG("%s", hdev->name);
6943 
6944 	status = mgmt_le_support(hdev);
6945 	if (status)
6946 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6947 				       status);
6948 
6949 	flags = __le32_to_cpu(cp->flags);
6950 	timeout = __le16_to_cpu(cp->timeout);
6951 
6952 	/* The current implementation only supports adding one instance and only
6953 	 * a subset of the specified flags.
6954 	 */
6955 	supported_flags = get_supported_adv_flags(hdev);
6956 	if (cp->instance != 0x01 || (flags & ~supported_flags))
6957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6958 				       MGMT_STATUS_INVALID_PARAMS);
6959 
6960 	hci_dev_lock(hdev);
6961 
6962 	if (timeout && !hdev_is_powered(hdev)) {
6963 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6964 				      MGMT_STATUS_REJECTED);
6965 		goto unlock;
6966 	}
6967 
6968 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6969 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6970 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6971 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6972 				      MGMT_STATUS_BUSY);
6973 		goto unlock;
6974 	}
6975 
6976 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6977 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6978 			       cp->scan_rsp_len, false)) {
6979 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6980 				      MGMT_STATUS_INVALID_PARAMS);
6981 		goto unlock;
6982 	}
6983 
6984 	INIT_DELAYED_WORK(&hdev->adv_instance.timeout_exp, adv_timeout_expired);
6985 
6986 	hdev->adv_instance.flags = flags;
6987 	hdev->adv_instance.adv_data_len = cp->adv_data_len;
6988 	hdev->adv_instance.scan_rsp_len = cp->scan_rsp_len;
6989 
6990 	if (cp->adv_data_len)
6991 		memcpy(hdev->adv_instance.adv_data, cp->data, cp->adv_data_len);
6992 
6993 	if (cp->scan_rsp_len)
6994 		memcpy(hdev->adv_instance.scan_rsp_data,
6995 		       cp->data + cp->adv_data_len, cp->scan_rsp_len);
6996 
6997 	if (hdev->adv_instance.timeout)
6998 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
6999 
7000 	hdev->adv_instance.timeout = timeout;
7001 
7002 	if (timeout)
7003 		queue_delayed_work(hdev->workqueue,
7004 				   &hdev->adv_instance.timeout_exp,
7005 				   msecs_to_jiffies(timeout * 1000));
7006 
7007 	if (!hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING_INSTANCE))
7008 		advertising_added(sk, hdev, 1);
7009 
7010 	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
7011 	 * we have no HCI communication to make. Simply return.
7012 	 */
7013 	if (!hdev_is_powered(hdev) ||
7014 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7015 		rp.instance = 0x01;
7016 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7017 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7018 		goto unlock;
7019 	}
7020 
7021 	/* We're good to go, update advertising data, parameters, and start
7022 	 * advertising.
7023 	 */
7024 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7025 			       data_len);
7026 	if (!cmd) {
7027 		err = -ENOMEM;
7028 		goto unlock;
7029 	}
7030 
7031 	hci_req_init(&req, hdev);
7032 
7033 	update_adv_data(&req);
7034 	update_scan_rsp_data(&req);
7035 	enable_advertising(&req);
7036 
7037 	err = hci_req_run(&req, add_advertising_complete);
7038 	if (err < 0)
7039 		mgmt_pending_remove(cmd);
7040 
7041 unlock:
7042 	hci_dev_unlock(hdev);
7043 
7044 	return err;
7045 }
7046 
7047 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7048 					u16 opcode)
7049 {
7050 	struct mgmt_pending_cmd *cmd;
7051 	struct mgmt_rp_remove_advertising rp;
7052 
7053 	BT_DBG("status %d", status);
7054 
7055 	hci_dev_lock(hdev);
7056 
7057 	/* A failure status here only means that we failed to disable
7058 	 * advertising. Otherwise, the advertising instance has been removed,
7059 	 * so report success.
7060 	 */
7061 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7062 	if (!cmd)
7063 		goto unlock;
7064 
7065 	rp.instance = 1;
7066 
7067 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7068 			  &rp, sizeof(rp));
7069 	mgmt_pending_remove(cmd);
7070 
7071 unlock:
7072 	hci_dev_unlock(hdev);
7073 }
7074 
7075 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7076 			      void *data, u16 data_len)
7077 {
7078 	struct mgmt_cp_remove_advertising *cp = data;
7079 	struct mgmt_rp_remove_advertising rp;
7080 	int err;
7081 	struct mgmt_pending_cmd *cmd;
7082 	struct hci_request req;
7083 
7084 	BT_DBG("%s", hdev->name);
7085 
7086 	/* The current implementation only allows modifying instance no 1. A
7087 	 * value of 0 indicates that all instances should be cleared.
7088 	 */
7089 	if (cp->instance > 1)
7090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7091 				       MGMT_STATUS_INVALID_PARAMS);
7092 
7093 	hci_dev_lock(hdev);
7094 
7095 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7096 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7097 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7098 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7099 				      MGMT_STATUS_BUSY);
7100 		goto unlock;
7101 	}
7102 
7103 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE)) {
7104 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7105 				      MGMT_STATUS_INVALID_PARAMS);
7106 		goto unlock;
7107 	}
7108 
7109 	if (hdev->adv_instance.timeout)
7110 		cancel_delayed_work(&hdev->adv_instance.timeout_exp);
7111 
7112 	memset(&hdev->adv_instance, 0, sizeof(hdev->adv_instance));
7113 
7114 	advertising_removed(sk, hdev, 1);
7115 
7116 	hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
7117 
7118 	/* If the HCI_ADVERTISING flag is set or the device isn't powered then
7119 	 * we have no HCI communication to make. Simply return.
7120 	 */
7121 	if (!hdev_is_powered(hdev) ||
7122 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7123 		rp.instance = 1;
7124 		err = mgmt_cmd_complete(sk, hdev->id,
7125 					MGMT_OP_REMOVE_ADVERTISING,
7126 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7127 		goto unlock;
7128 	}
7129 
7130 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7131 			       data_len);
7132 	if (!cmd) {
7133 		err = -ENOMEM;
7134 		goto unlock;
7135 	}
7136 
7137 	hci_req_init(&req, hdev);
7138 	disable_advertising(&req);
7139 
7140 	err = hci_req_run(&req, remove_advertising_complete);
7141 	if (err < 0)
7142 		mgmt_pending_remove(cmd);
7143 
7144 unlock:
7145 	hci_dev_unlock(hdev);
7146 
7147 	return err;
7148 }
7149 
7150 static const struct hci_mgmt_handler mgmt_handlers[] = {
7151 	{ NULL }, /* 0x0000 (no command) */
7152 	{ read_version,            MGMT_READ_VERSION_SIZE,
7153 						HCI_MGMT_NO_HDEV |
7154 						HCI_MGMT_UNTRUSTED },
7155 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7156 						HCI_MGMT_NO_HDEV |
7157 						HCI_MGMT_UNTRUSTED },
7158 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7159 						HCI_MGMT_NO_HDEV |
7160 						HCI_MGMT_UNTRUSTED },
7161 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7162 						HCI_MGMT_UNTRUSTED },
7163 	{ set_powered,             MGMT_SETTING_SIZE },
7164 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7165 	{ set_connectable,         MGMT_SETTING_SIZE },
7166 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7167 	{ set_bondable,            MGMT_SETTING_SIZE },
7168 	{ set_link_security,       MGMT_SETTING_SIZE },
7169 	{ set_ssp,                 MGMT_SETTING_SIZE },
7170 	{ set_hs,                  MGMT_SETTING_SIZE },
7171 	{ set_le,                  MGMT_SETTING_SIZE },
7172 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7173 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7174 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7175 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7176 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7177 						HCI_MGMT_VAR_LEN },
7178 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7179 						HCI_MGMT_VAR_LEN },
7180 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7181 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7182 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7183 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7184 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7185 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7186 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7187 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7188 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7189 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7190 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7191 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7192 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7193 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7194 						HCI_MGMT_VAR_LEN },
7195 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7196 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7197 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7198 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7199 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7200 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7201 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7202 	{ set_advertising,         MGMT_SETTING_SIZE },
7203 	{ set_bredr,               MGMT_SETTING_SIZE },
7204 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7205 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7206 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7207 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7208 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7209 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7210 						HCI_MGMT_VAR_LEN },
7211 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7212 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7213 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7214 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7215 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7216 						HCI_MGMT_VAR_LEN },
7217 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7218 						HCI_MGMT_NO_HDEV |
7219 						HCI_MGMT_UNTRUSTED },
7220 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7221 						HCI_MGMT_UNCONFIGURED |
7222 						HCI_MGMT_UNTRUSTED },
7223 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7224 						HCI_MGMT_UNCONFIGURED },
7225 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7226 						HCI_MGMT_UNCONFIGURED },
7227 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7228 						HCI_MGMT_VAR_LEN },
7229 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7230 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7231 						HCI_MGMT_NO_HDEV |
7232 						HCI_MGMT_UNTRUSTED },
7233 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7234 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7235 						HCI_MGMT_VAR_LEN },
7236 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7237 };
7238 
7239 void mgmt_index_added(struct hci_dev *hdev)
7240 {
7241 	struct mgmt_ev_ext_index ev;
7242 
7243 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7244 		return;
7245 
7246 	switch (hdev->dev_type) {
7247 	case HCI_BREDR:
7248 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7249 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7250 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7251 			ev.type = 0x01;
7252 		} else {
7253 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7254 					 HCI_MGMT_INDEX_EVENTS);
7255 			ev.type = 0x00;
7256 		}
7257 		break;
7258 	case HCI_AMP:
7259 		ev.type = 0x02;
7260 		break;
7261 	default:
7262 		return;
7263 	}
7264 
7265 	ev.bus = hdev->bus;
7266 
7267 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7268 			 HCI_MGMT_EXT_INDEX_EVENTS);
7269 }
7270 
7271 void mgmt_index_removed(struct hci_dev *hdev)
7272 {
7273 	struct mgmt_ev_ext_index ev;
7274 	u8 status = MGMT_STATUS_INVALID_INDEX;
7275 
7276 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7277 		return;
7278 
7279 	switch (hdev->dev_type) {
7280 	case HCI_BREDR:
7281 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7282 
7283 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7284 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7285 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7286 			ev.type = 0x01;
7287 		} else {
7288 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7289 					 HCI_MGMT_INDEX_EVENTS);
7290 			ev.type = 0x00;
7291 		}
7292 		break;
7293 	case HCI_AMP:
7294 		ev.type = 0x02;
7295 		break;
7296 	default:
7297 		return;
7298 	}
7299 
7300 	ev.bus = hdev->bus;
7301 
7302 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7303 			 HCI_MGMT_EXT_INDEX_EVENTS);
7304 }
7305 
7306 /* This function requires the caller holds hdev->lock */
7307 static void restart_le_actions(struct hci_request *req)
7308 {
7309 	struct hci_dev *hdev = req->hdev;
7310 	struct hci_conn_params *p;
7311 
7312 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7313 		/* Needed for AUTO_OFF case where might not "really"
7314 		 * have been powered off.
7315 		 */
7316 		list_del_init(&p->action);
7317 
7318 		switch (p->auto_connect) {
7319 		case HCI_AUTO_CONN_DIRECT:
7320 		case HCI_AUTO_CONN_ALWAYS:
7321 			list_add(&p->action, &hdev->pend_le_conns);
7322 			break;
7323 		case HCI_AUTO_CONN_REPORT:
7324 			list_add(&p->action, &hdev->pend_le_reports);
7325 			break;
7326 		default:
7327 			break;
7328 		}
7329 	}
7330 
7331 	__hci_update_background_scan(req);
7332 }
7333 
7334 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7335 {
7336 	struct cmd_lookup match = { NULL, hdev };
7337 
7338 	BT_DBG("status 0x%02x", status);
7339 
7340 	if (!status) {
7341 		/* Register the available SMP channels (BR/EDR and LE) only
7342 		 * when successfully powering on the controller. This late
7343 		 * registration is required so that LE SMP can clearly
7344 		 * decide if the public address or static address is used.
7345 		 */
7346 		smp_register(hdev);
7347 	}
7348 
7349 	hci_dev_lock(hdev);
7350 
7351 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7352 
7353 	new_settings(hdev, match.sk);
7354 
7355 	hci_dev_unlock(hdev);
7356 
7357 	if (match.sk)
7358 		sock_put(match.sk);
7359 }
7360 
7361 static int powered_update_hci(struct hci_dev *hdev)
7362 {
7363 	struct hci_request req;
7364 	u8 link_sec;
7365 
7366 	hci_req_init(&req, hdev);
7367 
7368 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
7369 	    !lmp_host_ssp_capable(hdev)) {
7370 		u8 mode = 0x01;
7371 
7372 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
7373 
7374 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
7375 			u8 support = 0x01;
7376 
7377 			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
7378 				    sizeof(support), &support);
7379 		}
7380 	}
7381 
7382 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
7383 	    lmp_bredr_capable(hdev)) {
7384 		struct hci_cp_write_le_host_supported cp;
7385 
7386 		cp.le = 0x01;
7387 		cp.simul = 0x00;
7388 
7389 		/* Check first if we already have the right
7390 		 * host state (host features set)
7391 		 */
7392 		if (cp.le != lmp_host_le_capable(hdev) ||
7393 		    cp.simul != lmp_host_le_br_capable(hdev))
7394 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
7395 				    sizeof(cp), &cp);
7396 	}
7397 
7398 	if (lmp_le_capable(hdev)) {
7399 		/* Make sure the controller has a good default for
7400 		 * advertising data. This also applies to the case
7401 		 * where BR/EDR was toggled during the AUTO_OFF phase.
7402 		 */
7403 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
7404 			update_adv_data(&req);
7405 			update_scan_rsp_data(&req);
7406 		}
7407 
7408 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7409 		    hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
7410 			enable_advertising(&req);
7411 
7412 		restart_le_actions(&req);
7413 	}
7414 
7415 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
7416 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
7417 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
7418 			    sizeof(link_sec), &link_sec);
7419 
7420 	if (lmp_bredr_capable(hdev)) {
7421 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
7422 			write_fast_connectable(&req, true);
7423 		else
7424 			write_fast_connectable(&req, false);
7425 		__hci_update_page_scan(&req);
7426 		update_class(&req);
7427 		update_name(&req);
7428 		update_eir(&req);
7429 	}
7430 
7431 	return hci_req_run(&req, powered_complete);
7432 }
7433 
7434 int mgmt_powered(struct hci_dev *hdev, u8 powered)
7435 {
7436 	struct cmd_lookup match = { NULL, hdev };
7437 	u8 status, zero_cod[] = { 0, 0, 0 };
7438 	int err;
7439 
7440 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
7441 		return 0;
7442 
7443 	if (powered) {
7444 		if (powered_update_hci(hdev) == 0)
7445 			return 0;
7446 
7447 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
7448 				     &match);
7449 		goto new_settings;
7450 	}
7451 
7452 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7453 
7454 	/* If the power off is because of hdev unregistration let
7455 	 * use the appropriate INVALID_INDEX status. Otherwise use
7456 	 * NOT_POWERED. We cover both scenarios here since later in
7457 	 * mgmt_index_removed() any hci_conn callbacks will have already
7458 	 * been triggered, potentially causing misleading DISCONNECTED
7459 	 * status responses.
7460 	 */
7461 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7462 		status = MGMT_STATUS_INVALID_INDEX;
7463 	else
7464 		status = MGMT_STATUS_NOT_POWERED;
7465 
7466 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7467 
7468 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
7469 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7470 				   zero_cod, sizeof(zero_cod), NULL);
7471 
7472 new_settings:
7473 	err = new_settings(hdev, match.sk);
7474 
7475 	if (match.sk)
7476 		sock_put(match.sk);
7477 
7478 	return err;
7479 }
7480 
7481 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7482 {
7483 	struct mgmt_pending_cmd *cmd;
7484 	u8 status;
7485 
7486 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7487 	if (!cmd)
7488 		return;
7489 
7490 	if (err == -ERFKILL)
7491 		status = MGMT_STATUS_RFKILLED;
7492 	else
7493 		status = MGMT_STATUS_FAILED;
7494 
7495 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7496 
7497 	mgmt_pending_remove(cmd);
7498 }
7499 
7500 void mgmt_discoverable_timeout(struct hci_dev *hdev)
7501 {
7502 	struct hci_request req;
7503 
7504 	hci_dev_lock(hdev);
7505 
7506 	/* When discoverable timeout triggers, then just make sure
7507 	 * the limited discoverable flag is cleared. Even in the case
7508 	 * of a timeout triggered from general discoverable, it is
7509 	 * safe to unconditionally clear the flag.
7510 	 */
7511 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
7512 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
7513 
7514 	hci_req_init(&req, hdev);
7515 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
7516 		u8 scan = SCAN_PAGE;
7517 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
7518 			    sizeof(scan), &scan);
7519 	}
7520 	update_class(&req);
7521 
7522 	/* Advertising instances don't use the global discoverable setting, so
7523 	 * only update AD if advertising was enabled using Set Advertising.
7524 	 */
7525 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7526 		update_adv_data(&req);
7527 
7528 	hci_req_run(&req, NULL);
7529 
7530 	hdev->discov_timeout = 0;
7531 
7532 	new_settings(hdev, NULL);
7533 
7534 	hci_dev_unlock(hdev);
7535 }
7536 
7537 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7538 		       bool persistent)
7539 {
7540 	struct mgmt_ev_new_link_key ev;
7541 
7542 	memset(&ev, 0, sizeof(ev));
7543 
7544 	ev.store_hint = persistent;
7545 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7546 	ev.key.addr.type = BDADDR_BREDR;
7547 	ev.key.type = key->type;
7548 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7549 	ev.key.pin_len = key->pin_len;
7550 
7551 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7552 }
7553 
7554 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7555 {
7556 	switch (ltk->type) {
7557 	case SMP_LTK:
7558 	case SMP_LTK_SLAVE:
7559 		if (ltk->authenticated)
7560 			return MGMT_LTK_AUTHENTICATED;
7561 		return MGMT_LTK_UNAUTHENTICATED;
7562 	case SMP_LTK_P256:
7563 		if (ltk->authenticated)
7564 			return MGMT_LTK_P256_AUTH;
7565 		return MGMT_LTK_P256_UNAUTH;
7566 	case SMP_LTK_P256_DEBUG:
7567 		return MGMT_LTK_P256_DEBUG;
7568 	}
7569 
7570 	return MGMT_LTK_UNAUTHENTICATED;
7571 }
7572 
7573 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7574 {
7575 	struct mgmt_ev_new_long_term_key ev;
7576 
7577 	memset(&ev, 0, sizeof(ev));
7578 
7579 	/* Devices using resolvable or non-resolvable random addresses
7580 	 * without providing an indentity resolving key don't require
7581 	 * to store long term keys. Their addresses will change the
7582 	 * next time around.
7583 	 *
7584 	 * Only when a remote device provides an identity address
7585 	 * make sure the long term key is stored. If the remote
7586 	 * identity is known, the long term keys are internally
7587 	 * mapped to the identity address. So allow static random
7588 	 * and public addresses here.
7589 	 */
7590 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7591 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7592 		ev.store_hint = 0x00;
7593 	else
7594 		ev.store_hint = persistent;
7595 
7596 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7597 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7598 	ev.key.type = mgmt_ltk_type(key);
7599 	ev.key.enc_size = key->enc_size;
7600 	ev.key.ediv = key->ediv;
7601 	ev.key.rand = key->rand;
7602 
7603 	if (key->type == SMP_LTK)
7604 		ev.key.master = 1;
7605 
7606 	memcpy(ev.key.val, key->val, sizeof(key->val));
7607 
7608 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7609 }
7610 
7611 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7612 {
7613 	struct mgmt_ev_new_irk ev;
7614 
7615 	memset(&ev, 0, sizeof(ev));
7616 
7617 	/* For identity resolving keys from devices that are already
7618 	 * using a public address or static random address, do not
7619 	 * ask for storing this key. The identity resolving key really
7620 	 * is only mandatory for devices using resovlable random
7621 	 * addresses.
7622 	 *
7623 	 * Storing all identity resolving keys has the downside that
7624 	 * they will be also loaded on next boot of they system. More
7625 	 * identity resolving keys, means more time during scanning is
7626 	 * needed to actually resolve these addresses.
7627 	 */
7628 	if (bacmp(&irk->rpa, BDADDR_ANY))
7629 		ev.store_hint = 0x01;
7630 	else
7631 		ev.store_hint = 0x00;
7632 
7633 	bacpy(&ev.rpa, &irk->rpa);
7634 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7635 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7636 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7637 
7638 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7639 }
7640 
7641 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7642 		   bool persistent)
7643 {
7644 	struct mgmt_ev_new_csrk ev;
7645 
7646 	memset(&ev, 0, sizeof(ev));
7647 
7648 	/* Devices using resolvable or non-resolvable random addresses
7649 	 * without providing an indentity resolving key don't require
7650 	 * to store signature resolving keys. Their addresses will change
7651 	 * the next time around.
7652 	 *
7653 	 * Only when a remote device provides an identity address
7654 	 * make sure the signature resolving key is stored. So allow
7655 	 * static random and public addresses here.
7656 	 */
7657 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7658 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7659 		ev.store_hint = 0x00;
7660 	else
7661 		ev.store_hint = persistent;
7662 
7663 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7664 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7665 	ev.key.type = csrk->type;
7666 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7667 
7668 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7669 }
7670 
7671 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7672 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7673 			 u16 max_interval, u16 latency, u16 timeout)
7674 {
7675 	struct mgmt_ev_new_conn_param ev;
7676 
7677 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7678 		return;
7679 
7680 	memset(&ev, 0, sizeof(ev));
7681 	bacpy(&ev.addr.bdaddr, bdaddr);
7682 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7683 	ev.store_hint = store_hint;
7684 	ev.min_interval = cpu_to_le16(min_interval);
7685 	ev.max_interval = cpu_to_le16(max_interval);
7686 	ev.latency = cpu_to_le16(latency);
7687 	ev.timeout = cpu_to_le16(timeout);
7688 
7689 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7690 }
7691 
7692 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7693 			   u32 flags, u8 *name, u8 name_len)
7694 {
7695 	char buf[512];
7696 	struct mgmt_ev_device_connected *ev = (void *) buf;
7697 	u16 eir_len = 0;
7698 
7699 	bacpy(&ev->addr.bdaddr, &conn->dst);
7700 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7701 
7702 	ev->flags = __cpu_to_le32(flags);
7703 
7704 	/* We must ensure that the EIR Data fields are ordered and
7705 	 * unique. Keep it simple for now and avoid the problem by not
7706 	 * adding any BR/EDR data to the LE adv.
7707 	 */
7708 	if (conn->le_adv_data_len > 0) {
7709 		memcpy(&ev->eir[eir_len],
7710 		       conn->le_adv_data, conn->le_adv_data_len);
7711 		eir_len = conn->le_adv_data_len;
7712 	} else {
7713 		if (name_len > 0)
7714 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7715 						  name, name_len);
7716 
7717 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7718 			eir_len = eir_append_data(ev->eir, eir_len,
7719 						  EIR_CLASS_OF_DEV,
7720 						  conn->dev_class, 3);
7721 	}
7722 
7723 	ev->eir_len = cpu_to_le16(eir_len);
7724 
7725 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7726 		    sizeof(*ev) + eir_len, NULL);
7727 }
7728 
7729 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7730 {
7731 	struct sock **sk = data;
7732 
7733 	cmd->cmd_complete(cmd, 0);
7734 
7735 	*sk = cmd->sk;
7736 	sock_hold(*sk);
7737 
7738 	mgmt_pending_remove(cmd);
7739 }
7740 
7741 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7742 {
7743 	struct hci_dev *hdev = data;
7744 	struct mgmt_cp_unpair_device *cp = cmd->param;
7745 
7746 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7747 
7748 	cmd->cmd_complete(cmd, 0);
7749 	mgmt_pending_remove(cmd);
7750 }
7751 
7752 bool mgmt_powering_down(struct hci_dev *hdev)
7753 {
7754 	struct mgmt_pending_cmd *cmd;
7755 	struct mgmt_mode *cp;
7756 
7757 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7758 	if (!cmd)
7759 		return false;
7760 
7761 	cp = cmd->param;
7762 	if (!cp->val)
7763 		return true;
7764 
7765 	return false;
7766 }
7767 
7768 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7769 			      u8 link_type, u8 addr_type, u8 reason,
7770 			      bool mgmt_connected)
7771 {
7772 	struct mgmt_ev_device_disconnected ev;
7773 	struct sock *sk = NULL;
7774 
7775 	/* The connection is still in hci_conn_hash so test for 1
7776 	 * instead of 0 to know if this is the last one.
7777 	 */
7778 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7779 		cancel_delayed_work(&hdev->power_off);
7780 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7781 	}
7782 
7783 	if (!mgmt_connected)
7784 		return;
7785 
7786 	if (link_type != ACL_LINK && link_type != LE_LINK)
7787 		return;
7788 
7789 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7790 
7791 	bacpy(&ev.addr.bdaddr, bdaddr);
7792 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7793 	ev.reason = reason;
7794 
7795 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7796 
7797 	if (sk)
7798 		sock_put(sk);
7799 
7800 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7801 			     hdev);
7802 }
7803 
7804 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7805 			    u8 link_type, u8 addr_type, u8 status)
7806 {
7807 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7808 	struct mgmt_cp_disconnect *cp;
7809 	struct mgmt_pending_cmd *cmd;
7810 
7811 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7812 			     hdev);
7813 
7814 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7815 	if (!cmd)
7816 		return;
7817 
7818 	cp = cmd->param;
7819 
7820 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7821 		return;
7822 
7823 	if (cp->addr.type != bdaddr_type)
7824 		return;
7825 
7826 	cmd->cmd_complete(cmd, mgmt_status(status));
7827 	mgmt_pending_remove(cmd);
7828 }
7829 
7830 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7831 			 u8 addr_type, u8 status)
7832 {
7833 	struct mgmt_ev_connect_failed ev;
7834 
7835 	/* The connection is still in hci_conn_hash so test for 1
7836 	 * instead of 0 to know if this is the last one.
7837 	 */
7838 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7839 		cancel_delayed_work(&hdev->power_off);
7840 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7841 	}
7842 
7843 	bacpy(&ev.addr.bdaddr, bdaddr);
7844 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7845 	ev.status = mgmt_status(status);
7846 
7847 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7848 }
7849 
7850 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7851 {
7852 	struct mgmt_ev_pin_code_request ev;
7853 
7854 	bacpy(&ev.addr.bdaddr, bdaddr);
7855 	ev.addr.type = BDADDR_BREDR;
7856 	ev.secure = secure;
7857 
7858 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7859 }
7860 
7861 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7862 				  u8 status)
7863 {
7864 	struct mgmt_pending_cmd *cmd;
7865 
7866 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7867 	if (!cmd)
7868 		return;
7869 
7870 	cmd->cmd_complete(cmd, mgmt_status(status));
7871 	mgmt_pending_remove(cmd);
7872 }
7873 
7874 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7875 				      u8 status)
7876 {
7877 	struct mgmt_pending_cmd *cmd;
7878 
7879 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7880 	if (!cmd)
7881 		return;
7882 
7883 	cmd->cmd_complete(cmd, mgmt_status(status));
7884 	mgmt_pending_remove(cmd);
7885 }
7886 
7887 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7888 			      u8 link_type, u8 addr_type, u32 value,
7889 			      u8 confirm_hint)
7890 {
7891 	struct mgmt_ev_user_confirm_request ev;
7892 
7893 	BT_DBG("%s", hdev->name);
7894 
7895 	bacpy(&ev.addr.bdaddr, bdaddr);
7896 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7897 	ev.confirm_hint = confirm_hint;
7898 	ev.value = cpu_to_le32(value);
7899 
7900 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7901 			  NULL);
7902 }
7903 
7904 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7905 			      u8 link_type, u8 addr_type)
7906 {
7907 	struct mgmt_ev_user_passkey_request ev;
7908 
7909 	BT_DBG("%s", hdev->name);
7910 
7911 	bacpy(&ev.addr.bdaddr, bdaddr);
7912 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7913 
7914 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7915 			  NULL);
7916 }
7917 
7918 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7919 				      u8 link_type, u8 addr_type, u8 status,
7920 				      u8 opcode)
7921 {
7922 	struct mgmt_pending_cmd *cmd;
7923 
7924 	cmd = pending_find(opcode, hdev);
7925 	if (!cmd)
7926 		return -ENOENT;
7927 
7928 	cmd->cmd_complete(cmd, mgmt_status(status));
7929 	mgmt_pending_remove(cmd);
7930 
7931 	return 0;
7932 }
7933 
7934 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7935 				     u8 link_type, u8 addr_type, u8 status)
7936 {
7937 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7938 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7939 }
7940 
7941 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7942 					 u8 link_type, u8 addr_type, u8 status)
7943 {
7944 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7945 					  status,
7946 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7947 }
7948 
7949 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7950 				     u8 link_type, u8 addr_type, u8 status)
7951 {
7952 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7953 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7954 }
7955 
7956 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7957 					 u8 link_type, u8 addr_type, u8 status)
7958 {
7959 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7960 					  status,
7961 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7962 }
7963 
7964 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7965 			     u8 link_type, u8 addr_type, u32 passkey,
7966 			     u8 entered)
7967 {
7968 	struct mgmt_ev_passkey_notify ev;
7969 
7970 	BT_DBG("%s", hdev->name);
7971 
7972 	bacpy(&ev.addr.bdaddr, bdaddr);
7973 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7974 	ev.passkey = __cpu_to_le32(passkey);
7975 	ev.entered = entered;
7976 
7977 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7978 }
7979 
7980 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7981 {
7982 	struct mgmt_ev_auth_failed ev;
7983 	struct mgmt_pending_cmd *cmd;
7984 	u8 status = mgmt_status(hci_status);
7985 
7986 	bacpy(&ev.addr.bdaddr, &conn->dst);
7987 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7988 	ev.status = status;
7989 
7990 	cmd = find_pairing(conn);
7991 
7992 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7993 		    cmd ? cmd->sk : NULL);
7994 
7995 	if (cmd) {
7996 		cmd->cmd_complete(cmd, status);
7997 		mgmt_pending_remove(cmd);
7998 	}
7999 }
8000 
8001 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8002 {
8003 	struct cmd_lookup match = { NULL, hdev };
8004 	bool changed;
8005 
8006 	if (status) {
8007 		u8 mgmt_err = mgmt_status(status);
8008 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8009 				     cmd_status_rsp, &mgmt_err);
8010 		return;
8011 	}
8012 
8013 	if (test_bit(HCI_AUTH, &hdev->flags))
8014 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8015 	else
8016 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8017 
8018 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8019 			     &match);
8020 
8021 	if (changed)
8022 		new_settings(hdev, match.sk);
8023 
8024 	if (match.sk)
8025 		sock_put(match.sk);
8026 }
8027 
8028 static void clear_eir(struct hci_request *req)
8029 {
8030 	struct hci_dev *hdev = req->hdev;
8031 	struct hci_cp_write_eir cp;
8032 
8033 	if (!lmp_ext_inq_capable(hdev))
8034 		return;
8035 
8036 	memset(hdev->eir, 0, sizeof(hdev->eir));
8037 
8038 	memset(&cp, 0, sizeof(cp));
8039 
8040 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8041 }
8042 
8043 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8044 {
8045 	struct cmd_lookup match = { NULL, hdev };
8046 	struct hci_request req;
8047 	bool changed = false;
8048 
8049 	if (status) {
8050 		u8 mgmt_err = mgmt_status(status);
8051 
8052 		if (enable && hci_dev_test_and_clear_flag(hdev,
8053 							  HCI_SSP_ENABLED)) {
8054 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8055 			new_settings(hdev, NULL);
8056 		}
8057 
8058 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8059 				     &mgmt_err);
8060 		return;
8061 	}
8062 
8063 	if (enable) {
8064 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8065 	} else {
8066 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8067 		if (!changed)
8068 			changed = hci_dev_test_and_clear_flag(hdev,
8069 							      HCI_HS_ENABLED);
8070 		else
8071 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8072 	}
8073 
8074 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8075 
8076 	if (changed)
8077 		new_settings(hdev, match.sk);
8078 
8079 	if (match.sk)
8080 		sock_put(match.sk);
8081 
8082 	hci_req_init(&req, hdev);
8083 
8084 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8085 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8086 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8087 				    sizeof(enable), &enable);
8088 		update_eir(&req);
8089 	} else {
8090 		clear_eir(&req);
8091 	}
8092 
8093 	hci_req_run(&req, NULL);
8094 }
8095 
8096 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8097 {
8098 	struct cmd_lookup *match = data;
8099 
8100 	if (match->sk == NULL) {
8101 		match->sk = cmd->sk;
8102 		sock_hold(match->sk);
8103 	}
8104 }
8105 
8106 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8107 				    u8 status)
8108 {
8109 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8110 
8111 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8112 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8113 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8114 
8115 	if (!status)
8116 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8117 				   dev_class, 3, NULL);
8118 
8119 	if (match.sk)
8120 		sock_put(match.sk);
8121 }
8122 
8123 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8124 {
8125 	struct mgmt_cp_set_local_name ev;
8126 	struct mgmt_pending_cmd *cmd;
8127 
8128 	if (status)
8129 		return;
8130 
8131 	memset(&ev, 0, sizeof(ev));
8132 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8133 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8134 
8135 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8136 	if (!cmd) {
8137 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8138 
8139 		/* If this is a HCI command related to powering on the
8140 		 * HCI dev don't send any mgmt signals.
8141 		 */
8142 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8143 			return;
8144 	}
8145 
8146 	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8147 			   cmd ? cmd->sk : NULL);
8148 }
8149 
8150 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8151 {
8152 	int i;
8153 
8154 	for (i = 0; i < uuid_count; i++) {
8155 		if (!memcmp(uuid, uuids[i], 16))
8156 			return true;
8157 	}
8158 
8159 	return false;
8160 }
8161 
8162 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8163 {
8164 	u16 parsed = 0;
8165 
8166 	while (parsed < eir_len) {
8167 		u8 field_len = eir[0];
8168 		u8 uuid[16];
8169 		int i;
8170 
8171 		if (field_len == 0)
8172 			break;
8173 
8174 		if (eir_len - parsed < field_len + 1)
8175 			break;
8176 
8177 		switch (eir[1]) {
8178 		case EIR_UUID16_ALL:
8179 		case EIR_UUID16_SOME:
8180 			for (i = 0; i + 3 <= field_len; i += 2) {
8181 				memcpy(uuid, bluetooth_base_uuid, 16);
8182 				uuid[13] = eir[i + 3];
8183 				uuid[12] = eir[i + 2];
8184 				if (has_uuid(uuid, uuid_count, uuids))
8185 					return true;
8186 			}
8187 			break;
8188 		case EIR_UUID32_ALL:
8189 		case EIR_UUID32_SOME:
8190 			for (i = 0; i + 5 <= field_len; i += 4) {
8191 				memcpy(uuid, bluetooth_base_uuid, 16);
8192 				uuid[15] = eir[i + 5];
8193 				uuid[14] = eir[i + 4];
8194 				uuid[13] = eir[i + 3];
8195 				uuid[12] = eir[i + 2];
8196 				if (has_uuid(uuid, uuid_count, uuids))
8197 					return true;
8198 			}
8199 			break;
8200 		case EIR_UUID128_ALL:
8201 		case EIR_UUID128_SOME:
8202 			for (i = 0; i + 17 <= field_len; i += 16) {
8203 				memcpy(uuid, eir + i + 2, 16);
8204 				if (has_uuid(uuid, uuid_count, uuids))
8205 					return true;
8206 			}
8207 			break;
8208 		}
8209 
8210 		parsed += field_len + 1;
8211 		eir += field_len + 1;
8212 	}
8213 
8214 	return false;
8215 }
8216 
8217 static void restart_le_scan(struct hci_dev *hdev)
8218 {
8219 	/* If controller is not scanning we are done. */
8220 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8221 		return;
8222 
8223 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8224 		       hdev->discovery.scan_start +
8225 		       hdev->discovery.scan_duration))
8226 		return;
8227 
8228 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
8229 			   DISCOV_LE_RESTART_DELAY);
8230 }
8231 
8232 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8233 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8234 {
8235 	/* If a RSSI threshold has been specified, and
8236 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8237 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8238 	 * is set, let it through for further processing, as we might need to
8239 	 * restart the scan.
8240 	 *
8241 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8242 	 * the results are also dropped.
8243 	 */
8244 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8245 	    (rssi == HCI_RSSI_INVALID ||
8246 	    (rssi < hdev->discovery.rssi &&
8247 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8248 		return  false;
8249 
8250 	if (hdev->discovery.uuid_count != 0) {
8251 		/* If a list of UUIDs is provided in filter, results with no
8252 		 * matching UUID should be dropped.
8253 		 */
8254 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8255 				   hdev->discovery.uuids) &&
8256 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8257 				   hdev->discovery.uuid_count,
8258 				   hdev->discovery.uuids))
8259 			return false;
8260 	}
8261 
8262 	/* If duplicate filtering does not report RSSI changes, then restart
8263 	 * scanning to ensure updated result with updated RSSI values.
8264 	 */
8265 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8266 		restart_le_scan(hdev);
8267 
8268 		/* Validate RSSI value against the RSSI threshold once more. */
8269 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8270 		    rssi < hdev->discovery.rssi)
8271 			return false;
8272 	}
8273 
8274 	return true;
8275 }
8276 
8277 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8278 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8279 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8280 {
8281 	char buf[512];
8282 	struct mgmt_ev_device_found *ev = (void *)buf;
8283 	size_t ev_size;
8284 
8285 	/* Don't send events for a non-kernel initiated discovery. With
8286 	 * LE one exception is if we have pend_le_reports > 0 in which
8287 	 * case we're doing passive scanning and want these events.
8288 	 */
8289 	if (!hci_discovery_active(hdev)) {
8290 		if (link_type == ACL_LINK)
8291 			return;
8292 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8293 			return;
8294 	}
8295 
8296 	if (hdev->discovery.result_filtering) {
8297 		/* We are using service discovery */
8298 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8299 				     scan_rsp_len))
8300 			return;
8301 	}
8302 
8303 	/* Make sure that the buffer is big enough. The 5 extra bytes
8304 	 * are for the potential CoD field.
8305 	 */
8306 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8307 		return;
8308 
8309 	memset(buf, 0, sizeof(buf));
8310 
8311 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8312 	 * RSSI value was reported as 0 when not available. This behavior
8313 	 * is kept when using device discovery. This is required for full
8314 	 * backwards compatibility with the API.
8315 	 *
8316 	 * However when using service discovery, the value 127 will be
8317 	 * returned when the RSSI is not available.
8318 	 */
8319 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8320 	    link_type == ACL_LINK)
8321 		rssi = 0;
8322 
8323 	bacpy(&ev->addr.bdaddr, bdaddr);
8324 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8325 	ev->rssi = rssi;
8326 	ev->flags = cpu_to_le32(flags);
8327 
8328 	if (eir_len > 0)
8329 		/* Copy EIR or advertising data into event */
8330 		memcpy(ev->eir, eir, eir_len);
8331 
8332 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
8333 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8334 					  dev_class, 3);
8335 
8336 	if (scan_rsp_len > 0)
8337 		/* Append scan response data to event */
8338 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8339 
8340 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8341 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8342 
8343 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8344 }
8345 
8346 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8347 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8348 {
8349 	struct mgmt_ev_device_found *ev;
8350 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8351 	u16 eir_len;
8352 
8353 	ev = (struct mgmt_ev_device_found *) buf;
8354 
8355 	memset(buf, 0, sizeof(buf));
8356 
8357 	bacpy(&ev->addr.bdaddr, bdaddr);
8358 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8359 	ev->rssi = rssi;
8360 
8361 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8362 				  name_len);
8363 
8364 	ev->eir_len = cpu_to_le16(eir_len);
8365 
8366 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8367 }
8368 
8369 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8370 {
8371 	struct mgmt_ev_discovering ev;
8372 
8373 	BT_DBG("%s discovering %u", hdev->name, discovering);
8374 
8375 	memset(&ev, 0, sizeof(ev));
8376 	ev.type = hdev->discovery.type;
8377 	ev.discovering = discovering;
8378 
8379 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8380 }
8381 
8382 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8383 {
8384 	BT_DBG("%s status %u", hdev->name, status);
8385 }
8386 
8387 void mgmt_reenable_advertising(struct hci_dev *hdev)
8388 {
8389 	struct hci_request req;
8390 
8391 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
8392 	    !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
8393 		return;
8394 
8395 	hci_req_init(&req, hdev);
8396 	enable_advertising(&req);
8397 	hci_req_run(&req, adv_enable_complete);
8398 }
8399 
8400 static struct hci_mgmt_chan chan = {
8401 	.channel	= HCI_CHANNEL_CONTROL,
8402 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8403 	.handlers	= mgmt_handlers,
8404 	.hdev_init	= mgmt_init_hdev,
8405 };
8406 
8407 int mgmt_init(void)
8408 {
8409 	return hci_mgmt_chan_register(&chan);
8410 }
8411 
8412 void mgmt_exit(void)
8413 {
8414 	hci_mgmt_chan_unregister(&chan);
8415 }
8416