xref: /linux/net/bluetooth/mgmt.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	14
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 };
110 
111 static const u16 mgmt_events[] = {
112 	MGMT_EV_CONTROLLER_ERROR,
113 	MGMT_EV_INDEX_ADDED,
114 	MGMT_EV_INDEX_REMOVED,
115 	MGMT_EV_NEW_SETTINGS,
116 	MGMT_EV_CLASS_OF_DEV_CHANGED,
117 	MGMT_EV_LOCAL_NAME_CHANGED,
118 	MGMT_EV_NEW_LINK_KEY,
119 	MGMT_EV_NEW_LONG_TERM_KEY,
120 	MGMT_EV_DEVICE_CONNECTED,
121 	MGMT_EV_DEVICE_DISCONNECTED,
122 	MGMT_EV_CONNECT_FAILED,
123 	MGMT_EV_PIN_CODE_REQUEST,
124 	MGMT_EV_USER_CONFIRM_REQUEST,
125 	MGMT_EV_USER_PASSKEY_REQUEST,
126 	MGMT_EV_AUTH_FAILED,
127 	MGMT_EV_DEVICE_FOUND,
128 	MGMT_EV_DISCOVERING,
129 	MGMT_EV_DEVICE_BLOCKED,
130 	MGMT_EV_DEVICE_UNBLOCKED,
131 	MGMT_EV_DEVICE_UNPAIRED,
132 	MGMT_EV_PASSKEY_NOTIFY,
133 	MGMT_EV_NEW_IRK,
134 	MGMT_EV_NEW_CSRK,
135 	MGMT_EV_DEVICE_ADDED,
136 	MGMT_EV_DEVICE_REMOVED,
137 	MGMT_EV_NEW_CONN_PARAM,
138 	MGMT_EV_UNCONF_INDEX_ADDED,
139 	MGMT_EV_UNCONF_INDEX_REMOVED,
140 	MGMT_EV_NEW_CONFIG_OPTIONS,
141 	MGMT_EV_EXT_INDEX_ADDED,
142 	MGMT_EV_EXT_INDEX_REMOVED,
143 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
144 	MGMT_EV_ADVERTISING_ADDED,
145 	MGMT_EV_ADVERTISING_REMOVED,
146 	MGMT_EV_EXT_INFO_CHANGED,
147 };
148 
149 static const u16 mgmt_untrusted_commands[] = {
150 	MGMT_OP_READ_INDEX_LIST,
151 	MGMT_OP_READ_INFO,
152 	MGMT_OP_READ_UNCONF_INDEX_LIST,
153 	MGMT_OP_READ_CONFIG_INFO,
154 	MGMT_OP_READ_EXT_INDEX_LIST,
155 	MGMT_OP_READ_EXT_INFO,
156 };
157 
158 static const u16 mgmt_untrusted_events[] = {
159 	MGMT_EV_INDEX_ADDED,
160 	MGMT_EV_INDEX_REMOVED,
161 	MGMT_EV_NEW_SETTINGS,
162 	MGMT_EV_CLASS_OF_DEV_CHANGED,
163 	MGMT_EV_LOCAL_NAME_CHANGED,
164 	MGMT_EV_UNCONF_INDEX_ADDED,
165 	MGMT_EV_UNCONF_INDEX_REMOVED,
166 	MGMT_EV_NEW_CONFIG_OPTIONS,
167 	MGMT_EV_EXT_INDEX_ADDED,
168 	MGMT_EV_EXT_INDEX_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 };
171 
172 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
173 
174 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
175 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
176 
177 /* HCI to MGMT error code conversion table */
178 static u8 mgmt_status_table[] = {
179 	MGMT_STATUS_SUCCESS,
180 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
181 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
182 	MGMT_STATUS_FAILED,		/* Hardware Failure */
183 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
184 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
185 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
186 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
187 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
188 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
189 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
190 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
191 	MGMT_STATUS_BUSY,		/* Command Disallowed */
192 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
193 	MGMT_STATUS_REJECTED,		/* Rejected Security */
194 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
195 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
196 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
197 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
198 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
199 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
200 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
201 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
202 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
203 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
204 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
205 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
206 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
207 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
208 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
209 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
210 	MGMT_STATUS_FAILED,		/* Unspecified Error */
211 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
212 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
213 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
214 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
215 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
216 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
217 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
218 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
219 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
220 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
221 	MGMT_STATUS_FAILED,		/* Transaction Collision */
222 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
223 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
224 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
225 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
226 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
227 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
228 	MGMT_STATUS_FAILED,		/* Slot Violation */
229 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
230 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
232 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
233 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
234 	MGMT_STATUS_BUSY,		/* Controller Busy */
235 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
236 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
237 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
238 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
239 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
240 };
241 
242 static u8 mgmt_status(u8 hci_status)
243 {
244 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
245 		return mgmt_status_table[hci_status];
246 
247 	return MGMT_STATUS_FAILED;
248 }
249 
250 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
251 			    u16 len, int flag)
252 {
253 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
254 			       flag, NULL);
255 }
256 
257 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
258 			      u16 len, int flag, struct sock *skip_sk)
259 {
260 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 			       flag, skip_sk);
262 }
263 
264 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
265 		      struct sock *skip_sk)
266 {
267 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 			       HCI_SOCK_TRUSTED, skip_sk);
269 }
270 
271 static u8 le_addr_type(u8 mgmt_addr_type)
272 {
273 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
274 		return ADDR_LE_DEV_PUBLIC;
275 	else
276 		return ADDR_LE_DEV_RANDOM;
277 }
278 
279 void mgmt_fill_version_info(void *ver)
280 {
281 	struct mgmt_rp_read_version *rp = ver;
282 
283 	rp->version = MGMT_VERSION;
284 	rp->revision = cpu_to_le16(MGMT_REVISION);
285 }
286 
287 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
288 			u16 data_len)
289 {
290 	struct mgmt_rp_read_version rp;
291 
292 	BT_DBG("sock %p", sk);
293 
294 	mgmt_fill_version_info(&rp);
295 
296 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
297 				 &rp, sizeof(rp));
298 }
299 
300 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
301 			 u16 data_len)
302 {
303 	struct mgmt_rp_read_commands *rp;
304 	u16 num_commands, num_events;
305 	size_t rp_size;
306 	int i, err;
307 
308 	BT_DBG("sock %p", sk);
309 
310 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
311 		num_commands = ARRAY_SIZE(mgmt_commands);
312 		num_events = ARRAY_SIZE(mgmt_events);
313 	} else {
314 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
315 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
316 	}
317 
318 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
319 
320 	rp = kmalloc(rp_size, GFP_KERNEL);
321 	if (!rp)
322 		return -ENOMEM;
323 
324 	rp->num_commands = cpu_to_le16(num_commands);
325 	rp->num_events = cpu_to_le16(num_events);
326 
327 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
328 		__le16 *opcode = rp->opcodes;
329 
330 		for (i = 0; i < num_commands; i++, opcode++)
331 			put_unaligned_le16(mgmt_commands[i], opcode);
332 
333 		for (i = 0; i < num_events; i++, opcode++)
334 			put_unaligned_le16(mgmt_events[i], opcode);
335 	} else {
336 		__le16 *opcode = rp->opcodes;
337 
338 		for (i = 0; i < num_commands; i++, opcode++)
339 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
340 
341 		for (i = 0; i < num_events; i++, opcode++)
342 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
343 	}
344 
345 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
346 				rp, rp_size);
347 	kfree(rp);
348 
349 	return err;
350 }
351 
352 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
353 			   u16 data_len)
354 {
355 	struct mgmt_rp_read_index_list *rp;
356 	struct hci_dev *d;
357 	size_t rp_len;
358 	u16 count;
359 	int err;
360 
361 	BT_DBG("sock %p", sk);
362 
363 	read_lock(&hci_dev_list_lock);
364 
365 	count = 0;
366 	list_for_each_entry(d, &hci_dev_list, list) {
367 		if (d->dev_type == HCI_PRIMARY &&
368 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
369 			count++;
370 	}
371 
372 	rp_len = sizeof(*rp) + (2 * count);
373 	rp = kmalloc(rp_len, GFP_ATOMIC);
374 	if (!rp) {
375 		read_unlock(&hci_dev_list_lock);
376 		return -ENOMEM;
377 	}
378 
379 	count = 0;
380 	list_for_each_entry(d, &hci_dev_list, list) {
381 		if (hci_dev_test_flag(d, HCI_SETUP) ||
382 		    hci_dev_test_flag(d, HCI_CONFIG) ||
383 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
384 			continue;
385 
386 		/* Devices marked as raw-only are neither configured
387 		 * nor unconfigured controllers.
388 		 */
389 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
390 			continue;
391 
392 		if (d->dev_type == HCI_PRIMARY &&
393 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
394 			rp->index[count++] = cpu_to_le16(d->id);
395 			BT_DBG("Added hci%u", d->id);
396 		}
397 	}
398 
399 	rp->num_controllers = cpu_to_le16(count);
400 	rp_len = sizeof(*rp) + (2 * count);
401 
402 	read_unlock(&hci_dev_list_lock);
403 
404 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
405 				0, rp, rp_len);
406 
407 	kfree(rp);
408 
409 	return err;
410 }
411 
412 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
413 				  void *data, u16 data_len)
414 {
415 	struct mgmt_rp_read_unconf_index_list *rp;
416 	struct hci_dev *d;
417 	size_t rp_len;
418 	u16 count;
419 	int err;
420 
421 	BT_DBG("sock %p", sk);
422 
423 	read_lock(&hci_dev_list_lock);
424 
425 	count = 0;
426 	list_for_each_entry(d, &hci_dev_list, list) {
427 		if (d->dev_type == HCI_PRIMARY &&
428 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
429 			count++;
430 	}
431 
432 	rp_len = sizeof(*rp) + (2 * count);
433 	rp = kmalloc(rp_len, GFP_ATOMIC);
434 	if (!rp) {
435 		read_unlock(&hci_dev_list_lock);
436 		return -ENOMEM;
437 	}
438 
439 	count = 0;
440 	list_for_each_entry(d, &hci_dev_list, list) {
441 		if (hci_dev_test_flag(d, HCI_SETUP) ||
442 		    hci_dev_test_flag(d, HCI_CONFIG) ||
443 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
444 			continue;
445 
446 		/* Devices marked as raw-only are neither configured
447 		 * nor unconfigured controllers.
448 		 */
449 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
450 			continue;
451 
452 		if (d->dev_type == HCI_PRIMARY &&
453 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
454 			rp->index[count++] = cpu_to_le16(d->id);
455 			BT_DBG("Added hci%u", d->id);
456 		}
457 	}
458 
459 	rp->num_controllers = cpu_to_le16(count);
460 	rp_len = sizeof(*rp) + (2 * count);
461 
462 	read_unlock(&hci_dev_list_lock);
463 
464 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
465 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
466 
467 	kfree(rp);
468 
469 	return err;
470 }
471 
472 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
473 			       void *data, u16 data_len)
474 {
475 	struct mgmt_rp_read_ext_index_list *rp;
476 	struct hci_dev *d;
477 	size_t rp_len;
478 	u16 count;
479 	int err;
480 
481 	BT_DBG("sock %p", sk);
482 
483 	read_lock(&hci_dev_list_lock);
484 
485 	count = 0;
486 	list_for_each_entry(d, &hci_dev_list, list) {
487 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
488 			count++;
489 	}
490 
491 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
492 	rp = kmalloc(rp_len, GFP_ATOMIC);
493 	if (!rp) {
494 		read_unlock(&hci_dev_list_lock);
495 		return -ENOMEM;
496 	}
497 
498 	count = 0;
499 	list_for_each_entry(d, &hci_dev_list, list) {
500 		if (hci_dev_test_flag(d, HCI_SETUP) ||
501 		    hci_dev_test_flag(d, HCI_CONFIG) ||
502 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
503 			continue;
504 
505 		/* Devices marked as raw-only are neither configured
506 		 * nor unconfigured controllers.
507 		 */
508 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
509 			continue;
510 
511 		if (d->dev_type == HCI_PRIMARY) {
512 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
513 				rp->entry[count].type = 0x01;
514 			else
515 				rp->entry[count].type = 0x00;
516 		} else if (d->dev_type == HCI_AMP) {
517 			rp->entry[count].type = 0x02;
518 		} else {
519 			continue;
520 		}
521 
522 		rp->entry[count].bus = d->bus;
523 		rp->entry[count++].index = cpu_to_le16(d->id);
524 		BT_DBG("Added hci%u", d->id);
525 	}
526 
527 	rp->num_controllers = cpu_to_le16(count);
528 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
529 
530 	read_unlock(&hci_dev_list_lock);
531 
532 	/* If this command is called at least once, then all the
533 	 * default index and unconfigured index events are disabled
534 	 * and from now on only extended index events are used.
535 	 */
536 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
537 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
538 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
539 
540 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
541 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
542 
543 	kfree(rp);
544 
545 	return err;
546 }
547 
548 static bool is_configured(struct hci_dev *hdev)
549 {
550 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
551 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
552 		return false;
553 
554 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
555 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
556 		return false;
557 
558 	return true;
559 }
560 
561 static __le32 get_missing_options(struct hci_dev *hdev)
562 {
563 	u32 options = 0;
564 
565 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
568 
569 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
570 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
571 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
572 
573 	return cpu_to_le32(options);
574 }
575 
576 static int new_options(struct hci_dev *hdev, struct sock *skip)
577 {
578 	__le32 options = get_missing_options(hdev);
579 
580 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
581 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
582 }
583 
584 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
585 {
586 	__le32 options = get_missing_options(hdev);
587 
588 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
589 				 sizeof(options));
590 }
591 
592 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
593 			    void *data, u16 data_len)
594 {
595 	struct mgmt_rp_read_config_info rp;
596 	u32 options = 0;
597 
598 	BT_DBG("sock %p %s", sk, hdev->name);
599 
600 	hci_dev_lock(hdev);
601 
602 	memset(&rp, 0, sizeof(rp));
603 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
604 
605 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
606 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
607 
608 	if (hdev->set_bdaddr)
609 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
610 
611 	rp.supported_options = cpu_to_le32(options);
612 	rp.missing_options = get_missing_options(hdev);
613 
614 	hci_dev_unlock(hdev);
615 
616 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
617 				 &rp, sizeof(rp));
618 }
619 
620 static u32 get_supported_settings(struct hci_dev *hdev)
621 {
622 	u32 settings = 0;
623 
624 	settings |= MGMT_SETTING_POWERED;
625 	settings |= MGMT_SETTING_BONDABLE;
626 	settings |= MGMT_SETTING_DEBUG_KEYS;
627 	settings |= MGMT_SETTING_CONNECTABLE;
628 	settings |= MGMT_SETTING_DISCOVERABLE;
629 
630 	if (lmp_bredr_capable(hdev)) {
631 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
632 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
633 		settings |= MGMT_SETTING_BREDR;
634 		settings |= MGMT_SETTING_LINK_SECURITY;
635 
636 		if (lmp_ssp_capable(hdev)) {
637 			settings |= MGMT_SETTING_SSP;
638 			settings |= MGMT_SETTING_HS;
639 		}
640 
641 		if (lmp_sc_capable(hdev))
642 			settings |= MGMT_SETTING_SECURE_CONN;
643 	}
644 
645 	if (lmp_le_capable(hdev)) {
646 		settings |= MGMT_SETTING_LE;
647 		settings |= MGMT_SETTING_ADVERTISING;
648 		settings |= MGMT_SETTING_SECURE_CONN;
649 		settings |= MGMT_SETTING_PRIVACY;
650 		settings |= MGMT_SETTING_STATIC_ADDRESS;
651 	}
652 
653 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
654 	    hdev->set_bdaddr)
655 		settings |= MGMT_SETTING_CONFIGURATION;
656 
657 	return settings;
658 }
659 
660 static u32 get_current_settings(struct hci_dev *hdev)
661 {
662 	u32 settings = 0;
663 
664 	if (hdev_is_powered(hdev))
665 		settings |= MGMT_SETTING_POWERED;
666 
667 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
668 		settings |= MGMT_SETTING_CONNECTABLE;
669 
670 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
671 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
672 
673 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
674 		settings |= MGMT_SETTING_DISCOVERABLE;
675 
676 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
677 		settings |= MGMT_SETTING_BONDABLE;
678 
679 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
680 		settings |= MGMT_SETTING_BREDR;
681 
682 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
683 		settings |= MGMT_SETTING_LE;
684 
685 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
686 		settings |= MGMT_SETTING_LINK_SECURITY;
687 
688 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
689 		settings |= MGMT_SETTING_SSP;
690 
691 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
692 		settings |= MGMT_SETTING_HS;
693 
694 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
695 		settings |= MGMT_SETTING_ADVERTISING;
696 
697 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
698 		settings |= MGMT_SETTING_SECURE_CONN;
699 
700 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
701 		settings |= MGMT_SETTING_DEBUG_KEYS;
702 
703 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
704 		settings |= MGMT_SETTING_PRIVACY;
705 
706 	/* The current setting for static address has two purposes. The
707 	 * first is to indicate if the static address will be used and
708 	 * the second is to indicate if it is actually set.
709 	 *
710 	 * This means if the static address is not configured, this flag
711 	 * will never be set. If the address is configured, then if the
712 	 * address is actually used decides if the flag is set or not.
713 	 *
714 	 * For single mode LE only controllers and dual-mode controllers
715 	 * with BR/EDR disabled, the existence of the static address will
716 	 * be evaluated.
717 	 */
718 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
719 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
720 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
721 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
722 			settings |= MGMT_SETTING_STATIC_ADDRESS;
723 	}
724 
725 	return settings;
726 }
727 
728 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
729 {
730 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
731 }
732 
733 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
734 						  struct hci_dev *hdev,
735 						  const void *data)
736 {
737 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
738 }
739 
740 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
741 {
742 	struct mgmt_pending_cmd *cmd;
743 
744 	/* If there's a pending mgmt command the flags will not yet have
745 	 * their final values, so check for this first.
746 	 */
747 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
748 	if (cmd) {
749 		struct mgmt_mode *cp = cmd->param;
750 		if (cp->val == 0x01)
751 			return LE_AD_GENERAL;
752 		else if (cp->val == 0x02)
753 			return LE_AD_LIMITED;
754 	} else {
755 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
756 			return LE_AD_LIMITED;
757 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
758 			return LE_AD_GENERAL;
759 	}
760 
761 	return 0;
762 }
763 
764 bool mgmt_get_connectable(struct hci_dev *hdev)
765 {
766 	struct mgmt_pending_cmd *cmd;
767 
768 	/* If there's a pending mgmt command the flag will not yet have
769 	 * it's final value, so check for this first.
770 	 */
771 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
772 	if (cmd) {
773 		struct mgmt_mode *cp = cmd->param;
774 
775 		return cp->val;
776 	}
777 
778 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
779 }
780 
781 static void service_cache_off(struct work_struct *work)
782 {
783 	struct hci_dev *hdev = container_of(work, struct hci_dev,
784 					    service_cache.work);
785 	struct hci_request req;
786 
787 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
788 		return;
789 
790 	hci_req_init(&req, hdev);
791 
792 	hci_dev_lock(hdev);
793 
794 	__hci_req_update_eir(&req);
795 	__hci_req_update_class(&req);
796 
797 	hci_dev_unlock(hdev);
798 
799 	hci_req_run(&req, NULL);
800 }
801 
802 static void rpa_expired(struct work_struct *work)
803 {
804 	struct hci_dev *hdev = container_of(work, struct hci_dev,
805 					    rpa_expired.work);
806 	struct hci_request req;
807 
808 	BT_DBG("");
809 
810 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
811 
812 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
813 		return;
814 
815 	/* The generation of a new RPA and programming it into the
816 	 * controller happens in the hci_req_enable_advertising()
817 	 * function.
818 	 */
819 	hci_req_init(&req, hdev);
820 	__hci_req_enable_advertising(&req);
821 	hci_req_run(&req, NULL);
822 }
823 
824 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
825 {
826 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
827 		return;
828 
829 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
830 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
831 
832 	/* Non-mgmt controlled devices get this bit set
833 	 * implicitly so that pairing works for them, however
834 	 * for mgmt we require user-space to explicitly enable
835 	 * it
836 	 */
837 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
838 }
839 
840 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
841 				void *data, u16 data_len)
842 {
843 	struct mgmt_rp_read_info rp;
844 
845 	BT_DBG("sock %p %s", sk, hdev->name);
846 
847 	hci_dev_lock(hdev);
848 
849 	memset(&rp, 0, sizeof(rp));
850 
851 	bacpy(&rp.bdaddr, &hdev->bdaddr);
852 
853 	rp.version = hdev->hci_ver;
854 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
855 
856 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
857 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
858 
859 	memcpy(rp.dev_class, hdev->dev_class, 3);
860 
861 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
862 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
863 
864 	hci_dev_unlock(hdev);
865 
866 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
867 				 sizeof(rp));
868 }
869 
870 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
871 {
872 	u16 eir_len = 0;
873 	size_t name_len;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
876 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
877 					  hdev->dev_class, 3);
878 
879 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
880 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
881 					  hdev->appearance);
882 
883 	name_len = strlen(hdev->dev_name);
884 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
885 				  hdev->dev_name, name_len);
886 
887 	name_len = strlen(hdev->short_name);
888 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
889 				  hdev->short_name, name_len);
890 
891 	return eir_len;
892 }
893 
894 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
895 				    void *data, u16 data_len)
896 {
897 	char buf[512];
898 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
899 	u16 eir_len;
900 
901 	BT_DBG("sock %p %s", sk, hdev->name);
902 
903 	memset(&buf, 0, sizeof(buf));
904 
905 	hci_dev_lock(hdev);
906 
907 	bacpy(&rp->bdaddr, &hdev->bdaddr);
908 
909 	rp->version = hdev->hci_ver;
910 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
911 
912 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
913 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
914 
915 
916 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
917 	rp->eir_len = cpu_to_le16(eir_len);
918 
919 	hci_dev_unlock(hdev);
920 
921 	/* If this command is called at least once, then the events
922 	 * for class of device and local name changes are disabled
923 	 * and only the new extended controller information event
924 	 * is used.
925 	 */
926 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
927 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
928 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
929 
930 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
931 				 sizeof(*rp) + eir_len);
932 }
933 
934 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
935 {
936 	char buf[512];
937 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
938 	u16 eir_len;
939 
940 	memset(buf, 0, sizeof(buf));
941 
942 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
943 	ev->eir_len = cpu_to_le16(eir_len);
944 
945 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
946 				  sizeof(*ev) + eir_len,
947 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
948 }
949 
950 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
951 {
952 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
953 
954 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
955 				 sizeof(settings));
956 }
957 
958 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
959 {
960 	BT_DBG("%s status 0x%02x", hdev->name, status);
961 
962 	if (hci_conn_count(hdev) == 0) {
963 		cancel_delayed_work(&hdev->power_off);
964 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
965 	}
966 }
967 
968 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
969 {
970 	struct mgmt_ev_advertising_added ev;
971 
972 	ev.instance = instance;
973 
974 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
975 }
976 
977 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
978 			      u8 instance)
979 {
980 	struct mgmt_ev_advertising_removed ev;
981 
982 	ev.instance = instance;
983 
984 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
985 }
986 
987 static void cancel_adv_timeout(struct hci_dev *hdev)
988 {
989 	if (hdev->adv_instance_timeout) {
990 		hdev->adv_instance_timeout = 0;
991 		cancel_delayed_work(&hdev->adv_instance_expire);
992 	}
993 }
994 
995 static int clean_up_hci_state(struct hci_dev *hdev)
996 {
997 	struct hci_request req;
998 	struct hci_conn *conn;
999 	bool discov_stopped;
1000 	int err;
1001 
1002 	hci_req_init(&req, hdev);
1003 
1004 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1005 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1006 		u8 scan = 0x00;
1007 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1008 	}
1009 
1010 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1011 
1012 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1013 		__hci_req_disable_advertising(&req);
1014 
1015 	discov_stopped = hci_req_stop_discovery(&req);
1016 
1017 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1018 		/* 0x15 == Terminated due to Power Off */
1019 		__hci_abort_conn(&req, conn, 0x15);
1020 	}
1021 
1022 	err = hci_req_run(&req, clean_up_hci_complete);
1023 	if (!err && discov_stopped)
1024 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1025 
1026 	return err;
1027 }
1028 
1029 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1030 		       u16 len)
1031 {
1032 	struct mgmt_mode *cp = data;
1033 	struct mgmt_pending_cmd *cmd;
1034 	int err;
1035 
1036 	BT_DBG("request for %s", hdev->name);
1037 
1038 	if (cp->val != 0x00 && cp->val != 0x01)
1039 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1040 				       MGMT_STATUS_INVALID_PARAMS);
1041 
1042 	hci_dev_lock(hdev);
1043 
1044 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1045 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1046 				      MGMT_STATUS_BUSY);
1047 		goto failed;
1048 	}
1049 
1050 	if (!!cp->val == hdev_is_powered(hdev)) {
1051 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1052 		goto failed;
1053 	}
1054 
1055 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1056 	if (!cmd) {
1057 		err = -ENOMEM;
1058 		goto failed;
1059 	}
1060 
1061 	if (cp->val) {
1062 		queue_work(hdev->req_workqueue, &hdev->power_on);
1063 		err = 0;
1064 	} else {
1065 		/* Disconnect connections, stop scans, etc */
1066 		err = clean_up_hci_state(hdev);
1067 		if (!err)
1068 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1069 					   HCI_POWER_OFF_TIMEOUT);
1070 
1071 		/* ENODATA means there were no HCI commands queued */
1072 		if (err == -ENODATA) {
1073 			cancel_delayed_work(&hdev->power_off);
1074 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1075 			err = 0;
1076 		}
1077 	}
1078 
1079 failed:
1080 	hci_dev_unlock(hdev);
1081 	return err;
1082 }
1083 
1084 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1085 {
1086 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1087 
1088 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1089 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1090 }
1091 
1092 int mgmt_new_settings(struct hci_dev *hdev)
1093 {
1094 	return new_settings(hdev, NULL);
1095 }
1096 
1097 struct cmd_lookup {
1098 	struct sock *sk;
1099 	struct hci_dev *hdev;
1100 	u8 mgmt_status;
1101 };
1102 
1103 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1104 {
1105 	struct cmd_lookup *match = data;
1106 
1107 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1108 
1109 	list_del(&cmd->list);
1110 
1111 	if (match->sk == NULL) {
1112 		match->sk = cmd->sk;
1113 		sock_hold(match->sk);
1114 	}
1115 
1116 	mgmt_pending_free(cmd);
1117 }
1118 
1119 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1120 {
1121 	u8 *status = data;
1122 
1123 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1124 	mgmt_pending_remove(cmd);
1125 }
1126 
1127 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1128 {
1129 	if (cmd->cmd_complete) {
1130 		u8 *status = data;
1131 
1132 		cmd->cmd_complete(cmd, *status);
1133 		mgmt_pending_remove(cmd);
1134 
1135 		return;
1136 	}
1137 
1138 	cmd_status_rsp(cmd, data);
1139 }
1140 
1141 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1142 {
1143 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1144 				 cmd->param, cmd->param_len);
1145 }
1146 
1147 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1148 {
1149 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1150 				 cmd->param, sizeof(struct mgmt_addr_info));
1151 }
1152 
1153 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1154 {
1155 	if (!lmp_bredr_capable(hdev))
1156 		return MGMT_STATUS_NOT_SUPPORTED;
1157 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1158 		return MGMT_STATUS_REJECTED;
1159 	else
1160 		return MGMT_STATUS_SUCCESS;
1161 }
1162 
1163 static u8 mgmt_le_support(struct hci_dev *hdev)
1164 {
1165 	if (!lmp_le_capable(hdev))
1166 		return MGMT_STATUS_NOT_SUPPORTED;
1167 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1168 		return MGMT_STATUS_REJECTED;
1169 	else
1170 		return MGMT_STATUS_SUCCESS;
1171 }
1172 
1173 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1174 {
1175 	struct mgmt_pending_cmd *cmd;
1176 
1177 	BT_DBG("status 0x%02x", status);
1178 
1179 	hci_dev_lock(hdev);
1180 
1181 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1182 	if (!cmd)
1183 		goto unlock;
1184 
1185 	if (status) {
1186 		u8 mgmt_err = mgmt_status(status);
1187 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1188 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1189 		goto remove_cmd;
1190 	}
1191 
1192 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1193 	    hdev->discov_timeout > 0) {
1194 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1195 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1196 	}
1197 
1198 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1199 	new_settings(hdev, cmd->sk);
1200 
1201 remove_cmd:
1202 	mgmt_pending_remove(cmd);
1203 
1204 unlock:
1205 	hci_dev_unlock(hdev);
1206 }
1207 
1208 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1209 			    u16 len)
1210 {
1211 	struct mgmt_cp_set_discoverable *cp = data;
1212 	struct mgmt_pending_cmd *cmd;
1213 	u16 timeout;
1214 	int err;
1215 
1216 	BT_DBG("request for %s", hdev->name);
1217 
1218 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1219 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1220 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1221 				       MGMT_STATUS_REJECTED);
1222 
1223 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1224 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1225 				       MGMT_STATUS_INVALID_PARAMS);
1226 
1227 	timeout = __le16_to_cpu(cp->timeout);
1228 
1229 	/* Disabling discoverable requires that no timeout is set,
1230 	 * and enabling limited discoverable requires a timeout.
1231 	 */
1232 	if ((cp->val == 0x00 && timeout > 0) ||
1233 	    (cp->val == 0x02 && timeout == 0))
1234 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1235 				       MGMT_STATUS_INVALID_PARAMS);
1236 
1237 	hci_dev_lock(hdev);
1238 
1239 	if (!hdev_is_powered(hdev) && timeout > 0) {
1240 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1241 				      MGMT_STATUS_NOT_POWERED);
1242 		goto failed;
1243 	}
1244 
1245 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1246 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1247 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1248 				      MGMT_STATUS_BUSY);
1249 		goto failed;
1250 	}
1251 
1252 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1253 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1254 				      MGMT_STATUS_REJECTED);
1255 		goto failed;
1256 	}
1257 
1258 	if (!hdev_is_powered(hdev)) {
1259 		bool changed = false;
1260 
1261 		/* Setting limited discoverable when powered off is
1262 		 * not a valid operation since it requires a timeout
1263 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1264 		 */
1265 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1266 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1267 			changed = true;
1268 		}
1269 
1270 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1271 		if (err < 0)
1272 			goto failed;
1273 
1274 		if (changed)
1275 			err = new_settings(hdev, sk);
1276 
1277 		goto failed;
1278 	}
1279 
1280 	/* If the current mode is the same, then just update the timeout
1281 	 * value with the new value. And if only the timeout gets updated,
1282 	 * then no need for any HCI transactions.
1283 	 */
1284 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1285 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1286 						   HCI_LIMITED_DISCOVERABLE)) {
1287 		cancel_delayed_work(&hdev->discov_off);
1288 		hdev->discov_timeout = timeout;
1289 
1290 		if (cp->val && hdev->discov_timeout > 0) {
1291 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1292 			queue_delayed_work(hdev->req_workqueue,
1293 					   &hdev->discov_off, to);
1294 		}
1295 
1296 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1297 		goto failed;
1298 	}
1299 
1300 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1301 	if (!cmd) {
1302 		err = -ENOMEM;
1303 		goto failed;
1304 	}
1305 
1306 	/* Cancel any potential discoverable timeout that might be
1307 	 * still active and store new timeout value. The arming of
1308 	 * the timeout happens in the complete handler.
1309 	 */
1310 	cancel_delayed_work(&hdev->discov_off);
1311 	hdev->discov_timeout = timeout;
1312 
1313 	if (cp->val)
1314 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1315 	else
1316 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1317 
1318 	/* Limited discoverable mode */
1319 	if (cp->val == 0x02)
1320 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1321 	else
1322 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1323 
1324 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1325 	err = 0;
1326 
1327 failed:
1328 	hci_dev_unlock(hdev);
1329 	return err;
1330 }
1331 
1332 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1333 {
1334 	struct mgmt_pending_cmd *cmd;
1335 
1336 	BT_DBG("status 0x%02x", status);
1337 
1338 	hci_dev_lock(hdev);
1339 
1340 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1341 	if (!cmd)
1342 		goto unlock;
1343 
1344 	if (status) {
1345 		u8 mgmt_err = mgmt_status(status);
1346 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1347 		goto remove_cmd;
1348 	}
1349 
1350 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1351 	new_settings(hdev, cmd->sk);
1352 
1353 remove_cmd:
1354 	mgmt_pending_remove(cmd);
1355 
1356 unlock:
1357 	hci_dev_unlock(hdev);
1358 }
1359 
1360 static int set_connectable_update_settings(struct hci_dev *hdev,
1361 					   struct sock *sk, u8 val)
1362 {
1363 	bool changed = false;
1364 	int err;
1365 
1366 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1367 		changed = true;
1368 
1369 	if (val) {
1370 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1371 	} else {
1372 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1373 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1374 	}
1375 
1376 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1377 	if (err < 0)
1378 		return err;
1379 
1380 	if (changed) {
1381 		hci_req_update_scan(hdev);
1382 		hci_update_background_scan(hdev);
1383 		return new_settings(hdev, sk);
1384 	}
1385 
1386 	return 0;
1387 }
1388 
1389 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1390 			   u16 len)
1391 {
1392 	struct mgmt_mode *cp = data;
1393 	struct mgmt_pending_cmd *cmd;
1394 	int err;
1395 
1396 	BT_DBG("request for %s", hdev->name);
1397 
1398 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1399 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1400 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1401 				       MGMT_STATUS_REJECTED);
1402 
1403 	if (cp->val != 0x00 && cp->val != 0x01)
1404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1405 				       MGMT_STATUS_INVALID_PARAMS);
1406 
1407 	hci_dev_lock(hdev);
1408 
1409 	if (!hdev_is_powered(hdev)) {
1410 		err = set_connectable_update_settings(hdev, sk, cp->val);
1411 		goto failed;
1412 	}
1413 
1414 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1415 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1416 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1417 				      MGMT_STATUS_BUSY);
1418 		goto failed;
1419 	}
1420 
1421 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1422 	if (!cmd) {
1423 		err = -ENOMEM;
1424 		goto failed;
1425 	}
1426 
1427 	if (cp->val) {
1428 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1429 	} else {
1430 		if (hdev->discov_timeout > 0)
1431 			cancel_delayed_work(&hdev->discov_off);
1432 
1433 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1434 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1435 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1436 	}
1437 
1438 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1439 	err = 0;
1440 
1441 failed:
1442 	hci_dev_unlock(hdev);
1443 	return err;
1444 }
1445 
1446 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1447 			u16 len)
1448 {
1449 	struct mgmt_mode *cp = data;
1450 	bool changed;
1451 	int err;
1452 
1453 	BT_DBG("request for %s", hdev->name);
1454 
1455 	if (cp->val != 0x00 && cp->val != 0x01)
1456 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1457 				       MGMT_STATUS_INVALID_PARAMS);
1458 
1459 	hci_dev_lock(hdev);
1460 
1461 	if (cp->val)
1462 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1463 	else
1464 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1465 
1466 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1467 	if (err < 0)
1468 		goto unlock;
1469 
1470 	if (changed) {
1471 		/* In limited privacy mode the change of bondable mode
1472 		 * may affect the local advertising address.
1473 		 */
1474 		if (hdev_is_powered(hdev) &&
1475 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1476 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1477 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1478 			queue_work(hdev->req_workqueue,
1479 				   &hdev->discoverable_update);
1480 
1481 		err = new_settings(hdev, sk);
1482 	}
1483 
1484 unlock:
1485 	hci_dev_unlock(hdev);
1486 	return err;
1487 }
1488 
1489 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1490 			     u16 len)
1491 {
1492 	struct mgmt_mode *cp = data;
1493 	struct mgmt_pending_cmd *cmd;
1494 	u8 val, status;
1495 	int err;
1496 
1497 	BT_DBG("request for %s", hdev->name);
1498 
1499 	status = mgmt_bredr_support(hdev);
1500 	if (status)
1501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1502 				       status);
1503 
1504 	if (cp->val != 0x00 && cp->val != 0x01)
1505 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1506 				       MGMT_STATUS_INVALID_PARAMS);
1507 
1508 	hci_dev_lock(hdev);
1509 
1510 	if (!hdev_is_powered(hdev)) {
1511 		bool changed = false;
1512 
1513 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1514 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1515 			changed = true;
1516 		}
1517 
1518 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1519 		if (err < 0)
1520 			goto failed;
1521 
1522 		if (changed)
1523 			err = new_settings(hdev, sk);
1524 
1525 		goto failed;
1526 	}
1527 
1528 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1529 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1530 				      MGMT_STATUS_BUSY);
1531 		goto failed;
1532 	}
1533 
1534 	val = !!cp->val;
1535 
1536 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1537 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1538 		goto failed;
1539 	}
1540 
1541 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1542 	if (!cmd) {
1543 		err = -ENOMEM;
1544 		goto failed;
1545 	}
1546 
1547 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1548 	if (err < 0) {
1549 		mgmt_pending_remove(cmd);
1550 		goto failed;
1551 	}
1552 
1553 failed:
1554 	hci_dev_unlock(hdev);
1555 	return err;
1556 }
1557 
1558 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1559 {
1560 	struct mgmt_mode *cp = data;
1561 	struct mgmt_pending_cmd *cmd;
1562 	u8 status;
1563 	int err;
1564 
1565 	BT_DBG("request for %s", hdev->name);
1566 
1567 	status = mgmt_bredr_support(hdev);
1568 	if (status)
1569 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1570 
1571 	if (!lmp_ssp_capable(hdev))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1573 				       MGMT_STATUS_NOT_SUPPORTED);
1574 
1575 	if (cp->val != 0x00 && cp->val != 0x01)
1576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1577 				       MGMT_STATUS_INVALID_PARAMS);
1578 
1579 	hci_dev_lock(hdev);
1580 
1581 	if (!hdev_is_powered(hdev)) {
1582 		bool changed;
1583 
1584 		if (cp->val) {
1585 			changed = !hci_dev_test_and_set_flag(hdev,
1586 							     HCI_SSP_ENABLED);
1587 		} else {
1588 			changed = hci_dev_test_and_clear_flag(hdev,
1589 							      HCI_SSP_ENABLED);
1590 			if (!changed)
1591 				changed = hci_dev_test_and_clear_flag(hdev,
1592 								      HCI_HS_ENABLED);
1593 			else
1594 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1595 		}
1596 
1597 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1598 		if (err < 0)
1599 			goto failed;
1600 
1601 		if (changed)
1602 			err = new_settings(hdev, sk);
1603 
1604 		goto failed;
1605 	}
1606 
1607 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1609 				      MGMT_STATUS_BUSY);
1610 		goto failed;
1611 	}
1612 
1613 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1614 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1615 		goto failed;
1616 	}
1617 
1618 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1619 	if (!cmd) {
1620 		err = -ENOMEM;
1621 		goto failed;
1622 	}
1623 
1624 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1625 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1626 			     sizeof(cp->val), &cp->val);
1627 
1628 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1629 	if (err < 0) {
1630 		mgmt_pending_remove(cmd);
1631 		goto failed;
1632 	}
1633 
1634 failed:
1635 	hci_dev_unlock(hdev);
1636 	return err;
1637 }
1638 
1639 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1640 {
1641 	struct mgmt_mode *cp = data;
1642 	bool changed;
1643 	u8 status;
1644 	int err;
1645 
1646 	BT_DBG("request for %s", hdev->name);
1647 
1648 	status = mgmt_bredr_support(hdev);
1649 	if (status)
1650 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1651 
1652 	if (!lmp_ssp_capable(hdev))
1653 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1654 				       MGMT_STATUS_NOT_SUPPORTED);
1655 
1656 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1657 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1658 				       MGMT_STATUS_REJECTED);
1659 
1660 	if (cp->val != 0x00 && cp->val != 0x01)
1661 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1662 				       MGMT_STATUS_INVALID_PARAMS);
1663 
1664 	hci_dev_lock(hdev);
1665 
1666 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1667 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1668 				      MGMT_STATUS_BUSY);
1669 		goto unlock;
1670 	}
1671 
1672 	if (cp->val) {
1673 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1674 	} else {
1675 		if (hdev_is_powered(hdev)) {
1676 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1677 					      MGMT_STATUS_REJECTED);
1678 			goto unlock;
1679 		}
1680 
1681 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1682 	}
1683 
1684 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1685 	if (err < 0)
1686 		goto unlock;
1687 
1688 	if (changed)
1689 		err = new_settings(hdev, sk);
1690 
1691 unlock:
1692 	hci_dev_unlock(hdev);
1693 	return err;
1694 }
1695 
1696 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1697 {
1698 	struct cmd_lookup match = { NULL, hdev };
1699 
1700 	hci_dev_lock(hdev);
1701 
1702 	if (status) {
1703 		u8 mgmt_err = mgmt_status(status);
1704 
1705 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1706 				     &mgmt_err);
1707 		goto unlock;
1708 	}
1709 
1710 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1711 
1712 	new_settings(hdev, match.sk);
1713 
1714 	if (match.sk)
1715 		sock_put(match.sk);
1716 
1717 	/* Make sure the controller has a good default for
1718 	 * advertising data. Restrict the update to when LE
1719 	 * has actually been enabled. During power on, the
1720 	 * update in powered_update_hci will take care of it.
1721 	 */
1722 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1723 		struct hci_request req;
1724 
1725 		hci_req_init(&req, hdev);
1726 		__hci_req_update_adv_data(&req, 0x00);
1727 		__hci_req_update_scan_rsp_data(&req, 0x00);
1728 		hci_req_run(&req, NULL);
1729 		hci_update_background_scan(hdev);
1730 	}
1731 
1732 unlock:
1733 	hci_dev_unlock(hdev);
1734 }
1735 
1736 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1737 {
1738 	struct mgmt_mode *cp = data;
1739 	struct hci_cp_write_le_host_supported hci_cp;
1740 	struct mgmt_pending_cmd *cmd;
1741 	struct hci_request req;
1742 	int err;
1743 	u8 val, enabled;
1744 
1745 	BT_DBG("request for %s", hdev->name);
1746 
1747 	if (!lmp_le_capable(hdev))
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1749 				       MGMT_STATUS_NOT_SUPPORTED);
1750 
1751 	if (cp->val != 0x00 && cp->val != 0x01)
1752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1753 				       MGMT_STATUS_INVALID_PARAMS);
1754 
1755 	/* Bluetooth single mode LE only controllers or dual-mode
1756 	 * controllers configured as LE only devices, do not allow
1757 	 * switching LE off. These have either LE enabled explicitly
1758 	 * or BR/EDR has been previously switched off.
1759 	 *
1760 	 * When trying to enable an already enabled LE, then gracefully
1761 	 * send a positive response. Trying to disable it however will
1762 	 * result into rejection.
1763 	 */
1764 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1765 		if (cp->val == 0x01)
1766 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1767 
1768 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1769 				       MGMT_STATUS_REJECTED);
1770 	}
1771 
1772 	hci_dev_lock(hdev);
1773 
1774 	val = !!cp->val;
1775 	enabled = lmp_host_le_capable(hdev);
1776 
1777 	if (!val)
1778 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1779 
1780 	if (!hdev_is_powered(hdev) || val == enabled) {
1781 		bool changed = false;
1782 
1783 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1784 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1785 			changed = true;
1786 		}
1787 
1788 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1789 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1790 			changed = true;
1791 		}
1792 
1793 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1794 		if (err < 0)
1795 			goto unlock;
1796 
1797 		if (changed)
1798 			err = new_settings(hdev, sk);
1799 
1800 		goto unlock;
1801 	}
1802 
1803 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1804 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1805 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1806 				      MGMT_STATUS_BUSY);
1807 		goto unlock;
1808 	}
1809 
1810 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1811 	if (!cmd) {
1812 		err = -ENOMEM;
1813 		goto unlock;
1814 	}
1815 
1816 	hci_req_init(&req, hdev);
1817 
1818 	memset(&hci_cp, 0, sizeof(hci_cp));
1819 
1820 	if (val) {
1821 		hci_cp.le = val;
1822 		hci_cp.simul = 0x00;
1823 	} else {
1824 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1825 			__hci_req_disable_advertising(&req);
1826 	}
1827 
1828 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1829 		    &hci_cp);
1830 
1831 	err = hci_req_run(&req, le_enable_complete);
1832 	if (err < 0)
1833 		mgmt_pending_remove(cmd);
1834 
1835 unlock:
1836 	hci_dev_unlock(hdev);
1837 	return err;
1838 }
1839 
1840 /* This is a helper function to test for pending mgmt commands that can
1841  * cause CoD or EIR HCI commands. We can only allow one such pending
1842  * mgmt command at a time since otherwise we cannot easily track what
1843  * the current values are, will be, and based on that calculate if a new
1844  * HCI command needs to be sent and if yes with what value.
1845  */
1846 static bool pending_eir_or_class(struct hci_dev *hdev)
1847 {
1848 	struct mgmt_pending_cmd *cmd;
1849 
1850 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1851 		switch (cmd->opcode) {
1852 		case MGMT_OP_ADD_UUID:
1853 		case MGMT_OP_REMOVE_UUID:
1854 		case MGMT_OP_SET_DEV_CLASS:
1855 		case MGMT_OP_SET_POWERED:
1856 			return true;
1857 		}
1858 	}
1859 
1860 	return false;
1861 }
1862 
1863 static const u8 bluetooth_base_uuid[] = {
1864 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1865 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1866 };
1867 
1868 static u8 get_uuid_size(const u8 *uuid)
1869 {
1870 	u32 val;
1871 
1872 	if (memcmp(uuid, bluetooth_base_uuid, 12))
1873 		return 128;
1874 
1875 	val = get_unaligned_le32(&uuid[12]);
1876 	if (val > 0xffff)
1877 		return 32;
1878 
1879 	return 16;
1880 }
1881 
1882 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1883 {
1884 	struct mgmt_pending_cmd *cmd;
1885 
1886 	hci_dev_lock(hdev);
1887 
1888 	cmd = pending_find(mgmt_op, hdev);
1889 	if (!cmd)
1890 		goto unlock;
1891 
1892 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
1893 			  mgmt_status(status), hdev->dev_class, 3);
1894 
1895 	mgmt_pending_remove(cmd);
1896 
1897 unlock:
1898 	hci_dev_unlock(hdev);
1899 }
1900 
1901 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1902 {
1903 	BT_DBG("status 0x%02x", status);
1904 
1905 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1906 }
1907 
1908 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1909 {
1910 	struct mgmt_cp_add_uuid *cp = data;
1911 	struct mgmt_pending_cmd *cmd;
1912 	struct hci_request req;
1913 	struct bt_uuid *uuid;
1914 	int err;
1915 
1916 	BT_DBG("request for %s", hdev->name);
1917 
1918 	hci_dev_lock(hdev);
1919 
1920 	if (pending_eir_or_class(hdev)) {
1921 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1922 				      MGMT_STATUS_BUSY);
1923 		goto failed;
1924 	}
1925 
1926 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1927 	if (!uuid) {
1928 		err = -ENOMEM;
1929 		goto failed;
1930 	}
1931 
1932 	memcpy(uuid->uuid, cp->uuid, 16);
1933 	uuid->svc_hint = cp->svc_hint;
1934 	uuid->size = get_uuid_size(cp->uuid);
1935 
1936 	list_add_tail(&uuid->list, &hdev->uuids);
1937 
1938 	hci_req_init(&req, hdev);
1939 
1940 	__hci_req_update_class(&req);
1941 	__hci_req_update_eir(&req);
1942 
1943 	err = hci_req_run(&req, add_uuid_complete);
1944 	if (err < 0) {
1945 		if (err != -ENODATA)
1946 			goto failed;
1947 
1948 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1949 					hdev->dev_class, 3);
1950 		goto failed;
1951 	}
1952 
1953 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1954 	if (!cmd) {
1955 		err = -ENOMEM;
1956 		goto failed;
1957 	}
1958 
1959 	err = 0;
1960 
1961 failed:
1962 	hci_dev_unlock(hdev);
1963 	return err;
1964 }
1965 
1966 static bool enable_service_cache(struct hci_dev *hdev)
1967 {
1968 	if (!hdev_is_powered(hdev))
1969 		return false;
1970 
1971 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
1972 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1973 				   CACHE_TIMEOUT);
1974 		return true;
1975 	}
1976 
1977 	return false;
1978 }
1979 
1980 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1981 {
1982 	BT_DBG("status 0x%02x", status);
1983 
1984 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1985 }
1986 
1987 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1988 		       u16 len)
1989 {
1990 	struct mgmt_cp_remove_uuid *cp = data;
1991 	struct mgmt_pending_cmd *cmd;
1992 	struct bt_uuid *match, *tmp;
1993 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1994 	struct hci_request req;
1995 	int err, found;
1996 
1997 	BT_DBG("request for %s", hdev->name);
1998 
1999 	hci_dev_lock(hdev);
2000 
2001 	if (pending_eir_or_class(hdev)) {
2002 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2003 				      MGMT_STATUS_BUSY);
2004 		goto unlock;
2005 	}
2006 
2007 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2008 		hci_uuids_clear(hdev);
2009 
2010 		if (enable_service_cache(hdev)) {
2011 			err = mgmt_cmd_complete(sk, hdev->id,
2012 						MGMT_OP_REMOVE_UUID,
2013 						0, hdev->dev_class, 3);
2014 			goto unlock;
2015 		}
2016 
2017 		goto update_class;
2018 	}
2019 
2020 	found = 0;
2021 
2022 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2023 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2024 			continue;
2025 
2026 		list_del(&match->list);
2027 		kfree(match);
2028 		found++;
2029 	}
2030 
2031 	if (found == 0) {
2032 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2033 				      MGMT_STATUS_INVALID_PARAMS);
2034 		goto unlock;
2035 	}
2036 
2037 update_class:
2038 	hci_req_init(&req, hdev);
2039 
2040 	__hci_req_update_class(&req);
2041 	__hci_req_update_eir(&req);
2042 
2043 	err = hci_req_run(&req, remove_uuid_complete);
2044 	if (err < 0) {
2045 		if (err != -ENODATA)
2046 			goto unlock;
2047 
2048 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2049 					hdev->dev_class, 3);
2050 		goto unlock;
2051 	}
2052 
2053 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2054 	if (!cmd) {
2055 		err = -ENOMEM;
2056 		goto unlock;
2057 	}
2058 
2059 	err = 0;
2060 
2061 unlock:
2062 	hci_dev_unlock(hdev);
2063 	return err;
2064 }
2065 
2066 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2067 {
2068 	BT_DBG("status 0x%02x", status);
2069 
2070 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2071 }
2072 
2073 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2074 			 u16 len)
2075 {
2076 	struct mgmt_cp_set_dev_class *cp = data;
2077 	struct mgmt_pending_cmd *cmd;
2078 	struct hci_request req;
2079 	int err;
2080 
2081 	BT_DBG("request for %s", hdev->name);
2082 
2083 	if (!lmp_bredr_capable(hdev))
2084 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2085 				       MGMT_STATUS_NOT_SUPPORTED);
2086 
2087 	hci_dev_lock(hdev);
2088 
2089 	if (pending_eir_or_class(hdev)) {
2090 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2091 				      MGMT_STATUS_BUSY);
2092 		goto unlock;
2093 	}
2094 
2095 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2096 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2097 				      MGMT_STATUS_INVALID_PARAMS);
2098 		goto unlock;
2099 	}
2100 
2101 	hdev->major_class = cp->major;
2102 	hdev->minor_class = cp->minor;
2103 
2104 	if (!hdev_is_powered(hdev)) {
2105 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2106 					hdev->dev_class, 3);
2107 		goto unlock;
2108 	}
2109 
2110 	hci_req_init(&req, hdev);
2111 
2112 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2113 		hci_dev_unlock(hdev);
2114 		cancel_delayed_work_sync(&hdev->service_cache);
2115 		hci_dev_lock(hdev);
2116 		__hci_req_update_eir(&req);
2117 	}
2118 
2119 	__hci_req_update_class(&req);
2120 
2121 	err = hci_req_run(&req, set_class_complete);
2122 	if (err < 0) {
2123 		if (err != -ENODATA)
2124 			goto unlock;
2125 
2126 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2127 					hdev->dev_class, 3);
2128 		goto unlock;
2129 	}
2130 
2131 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2132 	if (!cmd) {
2133 		err = -ENOMEM;
2134 		goto unlock;
2135 	}
2136 
2137 	err = 0;
2138 
2139 unlock:
2140 	hci_dev_unlock(hdev);
2141 	return err;
2142 }
2143 
2144 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2145 			  u16 len)
2146 {
2147 	struct mgmt_cp_load_link_keys *cp = data;
2148 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2149 				   sizeof(struct mgmt_link_key_info));
2150 	u16 key_count, expected_len;
2151 	bool changed;
2152 	int i;
2153 
2154 	BT_DBG("request for %s", hdev->name);
2155 
2156 	if (!lmp_bredr_capable(hdev))
2157 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2158 				       MGMT_STATUS_NOT_SUPPORTED);
2159 
2160 	key_count = __le16_to_cpu(cp->key_count);
2161 	if (key_count > max_key_count) {
2162 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2163 			   key_count);
2164 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2165 				       MGMT_STATUS_INVALID_PARAMS);
2166 	}
2167 
2168 	expected_len = sizeof(*cp) + key_count *
2169 					sizeof(struct mgmt_link_key_info);
2170 	if (expected_len != len) {
2171 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2172 			   expected_len, len);
2173 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2174 				       MGMT_STATUS_INVALID_PARAMS);
2175 	}
2176 
2177 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2178 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2179 				       MGMT_STATUS_INVALID_PARAMS);
2180 
2181 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2182 	       key_count);
2183 
2184 	for (i = 0; i < key_count; i++) {
2185 		struct mgmt_link_key_info *key = &cp->keys[i];
2186 
2187 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2188 			return mgmt_cmd_status(sk, hdev->id,
2189 					       MGMT_OP_LOAD_LINK_KEYS,
2190 					       MGMT_STATUS_INVALID_PARAMS);
2191 	}
2192 
2193 	hci_dev_lock(hdev);
2194 
2195 	hci_link_keys_clear(hdev);
2196 
2197 	if (cp->debug_keys)
2198 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2199 	else
2200 		changed = hci_dev_test_and_clear_flag(hdev,
2201 						      HCI_KEEP_DEBUG_KEYS);
2202 
2203 	if (changed)
2204 		new_settings(hdev, NULL);
2205 
2206 	for (i = 0; i < key_count; i++) {
2207 		struct mgmt_link_key_info *key = &cp->keys[i];
2208 
2209 		/* Always ignore debug keys and require a new pairing if
2210 		 * the user wants to use them.
2211 		 */
2212 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2213 			continue;
2214 
2215 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2216 				 key->type, key->pin_len, NULL);
2217 	}
2218 
2219 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2220 
2221 	hci_dev_unlock(hdev);
2222 
2223 	return 0;
2224 }
2225 
2226 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2227 			   u8 addr_type, struct sock *skip_sk)
2228 {
2229 	struct mgmt_ev_device_unpaired ev;
2230 
2231 	bacpy(&ev.addr.bdaddr, bdaddr);
2232 	ev.addr.type = addr_type;
2233 
2234 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2235 			  skip_sk);
2236 }
2237 
2238 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2239 			 u16 len)
2240 {
2241 	struct mgmt_cp_unpair_device *cp = data;
2242 	struct mgmt_rp_unpair_device rp;
2243 	struct hci_conn_params *params;
2244 	struct mgmt_pending_cmd *cmd;
2245 	struct hci_conn *conn;
2246 	u8 addr_type;
2247 	int err;
2248 
2249 	memset(&rp, 0, sizeof(rp));
2250 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2251 	rp.addr.type = cp->addr.type;
2252 
2253 	if (!bdaddr_type_is_valid(cp->addr.type))
2254 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2255 					 MGMT_STATUS_INVALID_PARAMS,
2256 					 &rp, sizeof(rp));
2257 
2258 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2259 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2260 					 MGMT_STATUS_INVALID_PARAMS,
2261 					 &rp, sizeof(rp));
2262 
2263 	hci_dev_lock(hdev);
2264 
2265 	if (!hdev_is_powered(hdev)) {
2266 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2267 					MGMT_STATUS_NOT_POWERED, &rp,
2268 					sizeof(rp));
2269 		goto unlock;
2270 	}
2271 
2272 	if (cp->addr.type == BDADDR_BREDR) {
2273 		/* If disconnection is requested, then look up the
2274 		 * connection. If the remote device is connected, it
2275 		 * will be later used to terminate the link.
2276 		 *
2277 		 * Setting it to NULL explicitly will cause no
2278 		 * termination of the link.
2279 		 */
2280 		if (cp->disconnect)
2281 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2282 						       &cp->addr.bdaddr);
2283 		else
2284 			conn = NULL;
2285 
2286 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2287 		if (err < 0) {
2288 			err = mgmt_cmd_complete(sk, hdev->id,
2289 						MGMT_OP_UNPAIR_DEVICE,
2290 						MGMT_STATUS_NOT_PAIRED, &rp,
2291 						sizeof(rp));
2292 			goto unlock;
2293 		}
2294 
2295 		goto done;
2296 	}
2297 
2298 	/* LE address type */
2299 	addr_type = le_addr_type(cp->addr.type);
2300 
2301 	hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2302 
2303 	err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2304 	if (err < 0) {
2305 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2306 					MGMT_STATUS_NOT_PAIRED, &rp,
2307 					sizeof(rp));
2308 		goto unlock;
2309 	}
2310 
2311 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2312 	if (!conn) {
2313 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2314 		goto done;
2315 	}
2316 
2317 	/* Abort any ongoing SMP pairing */
2318 	smp_cancel_pairing(conn);
2319 
2320 	/* Defer clearing up the connection parameters until closing to
2321 	 * give a chance of keeping them if a repairing happens.
2322 	 */
2323 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2324 
2325 	/* Disable auto-connection parameters if present */
2326 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2327 	if (params) {
2328 		if (params->explicit_connect)
2329 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2330 		else
2331 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2332 	}
2333 
2334 	/* If disconnection is not requested, then clear the connection
2335 	 * variable so that the link is not terminated.
2336 	 */
2337 	if (!cp->disconnect)
2338 		conn = NULL;
2339 
2340 done:
2341 	/* If the connection variable is set, then termination of the
2342 	 * link is requested.
2343 	 */
2344 	if (!conn) {
2345 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2346 					&rp, sizeof(rp));
2347 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2348 		goto unlock;
2349 	}
2350 
2351 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2352 			       sizeof(*cp));
2353 	if (!cmd) {
2354 		err = -ENOMEM;
2355 		goto unlock;
2356 	}
2357 
2358 	cmd->cmd_complete = addr_cmd_complete;
2359 
2360 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2361 	if (err < 0)
2362 		mgmt_pending_remove(cmd);
2363 
2364 unlock:
2365 	hci_dev_unlock(hdev);
2366 	return err;
2367 }
2368 
2369 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2370 		      u16 len)
2371 {
2372 	struct mgmt_cp_disconnect *cp = data;
2373 	struct mgmt_rp_disconnect rp;
2374 	struct mgmt_pending_cmd *cmd;
2375 	struct hci_conn *conn;
2376 	int err;
2377 
2378 	BT_DBG("");
2379 
2380 	memset(&rp, 0, sizeof(rp));
2381 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2382 	rp.addr.type = cp->addr.type;
2383 
2384 	if (!bdaddr_type_is_valid(cp->addr.type))
2385 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2386 					 MGMT_STATUS_INVALID_PARAMS,
2387 					 &rp, sizeof(rp));
2388 
2389 	hci_dev_lock(hdev);
2390 
2391 	if (!test_bit(HCI_UP, &hdev->flags)) {
2392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2393 					MGMT_STATUS_NOT_POWERED, &rp,
2394 					sizeof(rp));
2395 		goto failed;
2396 	}
2397 
2398 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2399 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2400 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2401 		goto failed;
2402 	}
2403 
2404 	if (cp->addr.type == BDADDR_BREDR)
2405 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2406 					       &cp->addr.bdaddr);
2407 	else
2408 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2409 					       le_addr_type(cp->addr.type));
2410 
2411 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2412 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2413 					MGMT_STATUS_NOT_CONNECTED, &rp,
2414 					sizeof(rp));
2415 		goto failed;
2416 	}
2417 
2418 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2419 	if (!cmd) {
2420 		err = -ENOMEM;
2421 		goto failed;
2422 	}
2423 
2424 	cmd->cmd_complete = generic_cmd_complete;
2425 
2426 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2427 	if (err < 0)
2428 		mgmt_pending_remove(cmd);
2429 
2430 failed:
2431 	hci_dev_unlock(hdev);
2432 	return err;
2433 }
2434 
2435 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2436 {
2437 	switch (link_type) {
2438 	case LE_LINK:
2439 		switch (addr_type) {
2440 		case ADDR_LE_DEV_PUBLIC:
2441 			return BDADDR_LE_PUBLIC;
2442 
2443 		default:
2444 			/* Fallback to LE Random address type */
2445 			return BDADDR_LE_RANDOM;
2446 		}
2447 
2448 	default:
2449 		/* Fallback to BR/EDR type */
2450 		return BDADDR_BREDR;
2451 	}
2452 }
2453 
2454 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2455 			   u16 data_len)
2456 {
2457 	struct mgmt_rp_get_connections *rp;
2458 	struct hci_conn *c;
2459 	size_t rp_len;
2460 	int err;
2461 	u16 i;
2462 
2463 	BT_DBG("");
2464 
2465 	hci_dev_lock(hdev);
2466 
2467 	if (!hdev_is_powered(hdev)) {
2468 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2469 				      MGMT_STATUS_NOT_POWERED);
2470 		goto unlock;
2471 	}
2472 
2473 	i = 0;
2474 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2475 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2476 			i++;
2477 	}
2478 
2479 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2480 	rp = kmalloc(rp_len, GFP_KERNEL);
2481 	if (!rp) {
2482 		err = -ENOMEM;
2483 		goto unlock;
2484 	}
2485 
2486 	i = 0;
2487 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2488 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2489 			continue;
2490 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2491 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2492 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2493 			continue;
2494 		i++;
2495 	}
2496 
2497 	rp->conn_count = cpu_to_le16(i);
2498 
2499 	/* Recalculate length in case of filtered SCO connections, etc */
2500 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2501 
2502 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2503 				rp_len);
2504 
2505 	kfree(rp);
2506 
2507 unlock:
2508 	hci_dev_unlock(hdev);
2509 	return err;
2510 }
2511 
2512 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2513 				   struct mgmt_cp_pin_code_neg_reply *cp)
2514 {
2515 	struct mgmt_pending_cmd *cmd;
2516 	int err;
2517 
2518 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2519 			       sizeof(*cp));
2520 	if (!cmd)
2521 		return -ENOMEM;
2522 
2523 	cmd->cmd_complete = addr_cmd_complete;
2524 
2525 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2526 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2527 	if (err < 0)
2528 		mgmt_pending_remove(cmd);
2529 
2530 	return err;
2531 }
2532 
2533 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2534 			  u16 len)
2535 {
2536 	struct hci_conn *conn;
2537 	struct mgmt_cp_pin_code_reply *cp = data;
2538 	struct hci_cp_pin_code_reply reply;
2539 	struct mgmt_pending_cmd *cmd;
2540 	int err;
2541 
2542 	BT_DBG("");
2543 
2544 	hci_dev_lock(hdev);
2545 
2546 	if (!hdev_is_powered(hdev)) {
2547 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2548 				      MGMT_STATUS_NOT_POWERED);
2549 		goto failed;
2550 	}
2551 
2552 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2553 	if (!conn) {
2554 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2555 				      MGMT_STATUS_NOT_CONNECTED);
2556 		goto failed;
2557 	}
2558 
2559 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2560 		struct mgmt_cp_pin_code_neg_reply ncp;
2561 
2562 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2563 
2564 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2565 
2566 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2567 		if (err >= 0)
2568 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2569 					      MGMT_STATUS_INVALID_PARAMS);
2570 
2571 		goto failed;
2572 	}
2573 
2574 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2575 	if (!cmd) {
2576 		err = -ENOMEM;
2577 		goto failed;
2578 	}
2579 
2580 	cmd->cmd_complete = addr_cmd_complete;
2581 
2582 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2583 	reply.pin_len = cp->pin_len;
2584 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2585 
2586 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2587 	if (err < 0)
2588 		mgmt_pending_remove(cmd);
2589 
2590 failed:
2591 	hci_dev_unlock(hdev);
2592 	return err;
2593 }
2594 
2595 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2596 			     u16 len)
2597 {
2598 	struct mgmt_cp_set_io_capability *cp = data;
2599 
2600 	BT_DBG("");
2601 
2602 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2603 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2604 				       MGMT_STATUS_INVALID_PARAMS);
2605 
2606 	hci_dev_lock(hdev);
2607 
2608 	hdev->io_capability = cp->io_capability;
2609 
2610 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2611 	       hdev->io_capability);
2612 
2613 	hci_dev_unlock(hdev);
2614 
2615 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2616 				 NULL, 0);
2617 }
2618 
2619 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2620 {
2621 	struct hci_dev *hdev = conn->hdev;
2622 	struct mgmt_pending_cmd *cmd;
2623 
2624 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2625 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2626 			continue;
2627 
2628 		if (cmd->user_data != conn)
2629 			continue;
2630 
2631 		return cmd;
2632 	}
2633 
2634 	return NULL;
2635 }
2636 
2637 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2638 {
2639 	struct mgmt_rp_pair_device rp;
2640 	struct hci_conn *conn = cmd->user_data;
2641 	int err;
2642 
2643 	bacpy(&rp.addr.bdaddr, &conn->dst);
2644 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2645 
2646 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2647 				status, &rp, sizeof(rp));
2648 
2649 	/* So we don't get further callbacks for this connection */
2650 	conn->connect_cfm_cb = NULL;
2651 	conn->security_cfm_cb = NULL;
2652 	conn->disconn_cfm_cb = NULL;
2653 
2654 	hci_conn_drop(conn);
2655 
2656 	/* The device is paired so there is no need to remove
2657 	 * its connection parameters anymore.
2658 	 */
2659 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2660 
2661 	hci_conn_put(conn);
2662 
2663 	return err;
2664 }
2665 
2666 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2667 {
2668 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2669 	struct mgmt_pending_cmd *cmd;
2670 
2671 	cmd = find_pairing(conn);
2672 	if (cmd) {
2673 		cmd->cmd_complete(cmd, status);
2674 		mgmt_pending_remove(cmd);
2675 	}
2676 }
2677 
2678 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2679 {
2680 	struct mgmt_pending_cmd *cmd;
2681 
2682 	BT_DBG("status %u", status);
2683 
2684 	cmd = find_pairing(conn);
2685 	if (!cmd) {
2686 		BT_DBG("Unable to find a pending command");
2687 		return;
2688 	}
2689 
2690 	cmd->cmd_complete(cmd, mgmt_status(status));
2691 	mgmt_pending_remove(cmd);
2692 }
2693 
2694 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2695 {
2696 	struct mgmt_pending_cmd *cmd;
2697 
2698 	BT_DBG("status %u", status);
2699 
2700 	if (!status)
2701 		return;
2702 
2703 	cmd = find_pairing(conn);
2704 	if (!cmd) {
2705 		BT_DBG("Unable to find a pending command");
2706 		return;
2707 	}
2708 
2709 	cmd->cmd_complete(cmd, mgmt_status(status));
2710 	mgmt_pending_remove(cmd);
2711 }
2712 
2713 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2714 		       u16 len)
2715 {
2716 	struct mgmt_cp_pair_device *cp = data;
2717 	struct mgmt_rp_pair_device rp;
2718 	struct mgmt_pending_cmd *cmd;
2719 	u8 sec_level, auth_type;
2720 	struct hci_conn *conn;
2721 	int err;
2722 
2723 	BT_DBG("");
2724 
2725 	memset(&rp, 0, sizeof(rp));
2726 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2727 	rp.addr.type = cp->addr.type;
2728 
2729 	if (!bdaddr_type_is_valid(cp->addr.type))
2730 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2731 					 MGMT_STATUS_INVALID_PARAMS,
2732 					 &rp, sizeof(rp));
2733 
2734 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2735 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2736 					 MGMT_STATUS_INVALID_PARAMS,
2737 					 &rp, sizeof(rp));
2738 
2739 	hci_dev_lock(hdev);
2740 
2741 	if (!hdev_is_powered(hdev)) {
2742 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2743 					MGMT_STATUS_NOT_POWERED, &rp,
2744 					sizeof(rp));
2745 		goto unlock;
2746 	}
2747 
2748 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2749 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2750 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2751 					sizeof(rp));
2752 		goto unlock;
2753 	}
2754 
2755 	sec_level = BT_SECURITY_MEDIUM;
2756 	auth_type = HCI_AT_DEDICATED_BONDING;
2757 
2758 	if (cp->addr.type == BDADDR_BREDR) {
2759 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2760 				       auth_type);
2761 	} else {
2762 		u8 addr_type = le_addr_type(cp->addr.type);
2763 		struct hci_conn_params *p;
2764 
2765 		/* When pairing a new device, it is expected to remember
2766 		 * this device for future connections. Adding the connection
2767 		 * parameter information ahead of time allows tracking
2768 		 * of the slave preferred values and will speed up any
2769 		 * further connection establishment.
2770 		 *
2771 		 * If connection parameters already exist, then they
2772 		 * will be kept and this function does nothing.
2773 		 */
2774 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2775 
2776 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2777 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2778 
2779 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2780 					   addr_type, sec_level,
2781 					   HCI_LE_CONN_TIMEOUT);
2782 	}
2783 
2784 	if (IS_ERR(conn)) {
2785 		int status;
2786 
2787 		if (PTR_ERR(conn) == -EBUSY)
2788 			status = MGMT_STATUS_BUSY;
2789 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2790 			status = MGMT_STATUS_NOT_SUPPORTED;
2791 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2792 			status = MGMT_STATUS_REJECTED;
2793 		else
2794 			status = MGMT_STATUS_CONNECT_FAILED;
2795 
2796 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2797 					status, &rp, sizeof(rp));
2798 		goto unlock;
2799 	}
2800 
2801 	if (conn->connect_cfm_cb) {
2802 		hci_conn_drop(conn);
2803 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2804 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2805 		goto unlock;
2806 	}
2807 
2808 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2809 	if (!cmd) {
2810 		err = -ENOMEM;
2811 		hci_conn_drop(conn);
2812 		goto unlock;
2813 	}
2814 
2815 	cmd->cmd_complete = pairing_complete;
2816 
2817 	/* For LE, just connecting isn't a proof that the pairing finished */
2818 	if (cp->addr.type == BDADDR_BREDR) {
2819 		conn->connect_cfm_cb = pairing_complete_cb;
2820 		conn->security_cfm_cb = pairing_complete_cb;
2821 		conn->disconn_cfm_cb = pairing_complete_cb;
2822 	} else {
2823 		conn->connect_cfm_cb = le_pairing_complete_cb;
2824 		conn->security_cfm_cb = le_pairing_complete_cb;
2825 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2826 	}
2827 
2828 	conn->io_capability = cp->io_cap;
2829 	cmd->user_data = hci_conn_get(conn);
2830 
2831 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2832 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2833 		cmd->cmd_complete(cmd, 0);
2834 		mgmt_pending_remove(cmd);
2835 	}
2836 
2837 	err = 0;
2838 
2839 unlock:
2840 	hci_dev_unlock(hdev);
2841 	return err;
2842 }
2843 
2844 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2845 			      u16 len)
2846 {
2847 	struct mgmt_addr_info *addr = data;
2848 	struct mgmt_pending_cmd *cmd;
2849 	struct hci_conn *conn;
2850 	int err;
2851 
2852 	BT_DBG("");
2853 
2854 	hci_dev_lock(hdev);
2855 
2856 	if (!hdev_is_powered(hdev)) {
2857 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2858 				      MGMT_STATUS_NOT_POWERED);
2859 		goto unlock;
2860 	}
2861 
2862 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2863 	if (!cmd) {
2864 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2865 				      MGMT_STATUS_INVALID_PARAMS);
2866 		goto unlock;
2867 	}
2868 
2869 	conn = cmd->user_data;
2870 
2871 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2872 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2873 				      MGMT_STATUS_INVALID_PARAMS);
2874 		goto unlock;
2875 	}
2876 
2877 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
2878 	mgmt_pending_remove(cmd);
2879 
2880 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2881 				addr, sizeof(*addr));
2882 unlock:
2883 	hci_dev_unlock(hdev);
2884 	return err;
2885 }
2886 
2887 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2888 			     struct mgmt_addr_info *addr, u16 mgmt_op,
2889 			     u16 hci_op, __le32 passkey)
2890 {
2891 	struct mgmt_pending_cmd *cmd;
2892 	struct hci_conn *conn;
2893 	int err;
2894 
2895 	hci_dev_lock(hdev);
2896 
2897 	if (!hdev_is_powered(hdev)) {
2898 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2899 					MGMT_STATUS_NOT_POWERED, addr,
2900 					sizeof(*addr));
2901 		goto done;
2902 	}
2903 
2904 	if (addr->type == BDADDR_BREDR)
2905 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2906 	else
2907 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
2908 					       le_addr_type(addr->type));
2909 
2910 	if (!conn) {
2911 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2912 					MGMT_STATUS_NOT_CONNECTED, addr,
2913 					sizeof(*addr));
2914 		goto done;
2915 	}
2916 
2917 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2918 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2919 		if (!err)
2920 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2921 						MGMT_STATUS_SUCCESS, addr,
2922 						sizeof(*addr));
2923 		else
2924 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
2925 						MGMT_STATUS_FAILED, addr,
2926 						sizeof(*addr));
2927 
2928 		goto done;
2929 	}
2930 
2931 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2932 	if (!cmd) {
2933 		err = -ENOMEM;
2934 		goto done;
2935 	}
2936 
2937 	cmd->cmd_complete = addr_cmd_complete;
2938 
2939 	/* Continue with pairing via HCI */
2940 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2941 		struct hci_cp_user_passkey_reply cp;
2942 
2943 		bacpy(&cp.bdaddr, &addr->bdaddr);
2944 		cp.passkey = passkey;
2945 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2946 	} else
2947 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2948 				   &addr->bdaddr);
2949 
2950 	if (err < 0)
2951 		mgmt_pending_remove(cmd);
2952 
2953 done:
2954 	hci_dev_unlock(hdev);
2955 	return err;
2956 }
2957 
2958 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2959 			      void *data, u16 len)
2960 {
2961 	struct mgmt_cp_pin_code_neg_reply *cp = data;
2962 
2963 	BT_DBG("");
2964 
2965 	return user_pairing_resp(sk, hdev, &cp->addr,
2966 				MGMT_OP_PIN_CODE_NEG_REPLY,
2967 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
2968 }
2969 
2970 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2971 			      u16 len)
2972 {
2973 	struct mgmt_cp_user_confirm_reply *cp = data;
2974 
2975 	BT_DBG("");
2976 
2977 	if (len != sizeof(*cp))
2978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2979 				       MGMT_STATUS_INVALID_PARAMS);
2980 
2981 	return user_pairing_resp(sk, hdev, &cp->addr,
2982 				 MGMT_OP_USER_CONFIRM_REPLY,
2983 				 HCI_OP_USER_CONFIRM_REPLY, 0);
2984 }
2985 
2986 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2987 				  void *data, u16 len)
2988 {
2989 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
2990 
2991 	BT_DBG("");
2992 
2993 	return user_pairing_resp(sk, hdev, &cp->addr,
2994 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2995 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2996 }
2997 
2998 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2999 			      u16 len)
3000 {
3001 	struct mgmt_cp_user_passkey_reply *cp = data;
3002 
3003 	BT_DBG("");
3004 
3005 	return user_pairing_resp(sk, hdev, &cp->addr,
3006 				 MGMT_OP_USER_PASSKEY_REPLY,
3007 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3008 }
3009 
3010 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3011 				  void *data, u16 len)
3012 {
3013 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3014 
3015 	BT_DBG("");
3016 
3017 	return user_pairing_resp(sk, hdev, &cp->addr,
3018 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3019 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3020 }
3021 
3022 static void adv_expire(struct hci_dev *hdev, u32 flags)
3023 {
3024 	struct adv_info *adv_instance;
3025 	struct hci_request req;
3026 	int err;
3027 
3028 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3029 	if (!adv_instance)
3030 		return;
3031 
3032 	/* stop if current instance doesn't need to be changed */
3033 	if (!(adv_instance->flags & flags))
3034 		return;
3035 
3036 	cancel_adv_timeout(hdev);
3037 
3038 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3039 	if (!adv_instance)
3040 		return;
3041 
3042 	hci_req_init(&req, hdev);
3043 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3044 					      true);
3045 	if (err)
3046 		return;
3047 
3048 	hci_req_run(&req, NULL);
3049 }
3050 
3051 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3052 {
3053 	struct mgmt_cp_set_local_name *cp;
3054 	struct mgmt_pending_cmd *cmd;
3055 
3056 	BT_DBG("status 0x%02x", status);
3057 
3058 	hci_dev_lock(hdev);
3059 
3060 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3061 	if (!cmd)
3062 		goto unlock;
3063 
3064 	cp = cmd->param;
3065 
3066 	if (status) {
3067 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3068 			        mgmt_status(status));
3069 	} else {
3070 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3071 				  cp, sizeof(*cp));
3072 
3073 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3074 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3075 	}
3076 
3077 	mgmt_pending_remove(cmd);
3078 
3079 unlock:
3080 	hci_dev_unlock(hdev);
3081 }
3082 
3083 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3084 			  u16 len)
3085 {
3086 	struct mgmt_cp_set_local_name *cp = data;
3087 	struct mgmt_pending_cmd *cmd;
3088 	struct hci_request req;
3089 	int err;
3090 
3091 	BT_DBG("");
3092 
3093 	hci_dev_lock(hdev);
3094 
3095 	/* If the old values are the same as the new ones just return a
3096 	 * direct command complete event.
3097 	 */
3098 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3099 	    !memcmp(hdev->short_name, cp->short_name,
3100 		    sizeof(hdev->short_name))) {
3101 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3102 					data, len);
3103 		goto failed;
3104 	}
3105 
3106 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3107 
3108 	if (!hdev_is_powered(hdev)) {
3109 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3110 
3111 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3112 					data, len);
3113 		if (err < 0)
3114 			goto failed;
3115 
3116 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3117 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3118 		ext_info_changed(hdev, sk);
3119 
3120 		goto failed;
3121 	}
3122 
3123 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3124 	if (!cmd) {
3125 		err = -ENOMEM;
3126 		goto failed;
3127 	}
3128 
3129 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3130 
3131 	hci_req_init(&req, hdev);
3132 
3133 	if (lmp_bredr_capable(hdev)) {
3134 		__hci_req_update_name(&req);
3135 		__hci_req_update_eir(&req);
3136 	}
3137 
3138 	/* The name is stored in the scan response data and so
3139 	 * no need to udpate the advertising data here.
3140 	 */
3141 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3142 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3143 
3144 	err = hci_req_run(&req, set_name_complete);
3145 	if (err < 0)
3146 		mgmt_pending_remove(cmd);
3147 
3148 failed:
3149 	hci_dev_unlock(hdev);
3150 	return err;
3151 }
3152 
3153 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3154 			  u16 len)
3155 {
3156 	struct mgmt_cp_set_appearance *cp = data;
3157 	u16 apperance;
3158 	int err;
3159 
3160 	BT_DBG("");
3161 
3162 	if (!lmp_le_capable(hdev))
3163 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3164 				       MGMT_STATUS_NOT_SUPPORTED);
3165 
3166 	apperance = le16_to_cpu(cp->appearance);
3167 
3168 	hci_dev_lock(hdev);
3169 
3170 	if (hdev->appearance != apperance) {
3171 		hdev->appearance = apperance;
3172 
3173 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3174 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3175 
3176 		ext_info_changed(hdev, sk);
3177 	}
3178 
3179 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3180 				0);
3181 
3182 	hci_dev_unlock(hdev);
3183 
3184 	return err;
3185 }
3186 
3187 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3188 				         u16 opcode, struct sk_buff *skb)
3189 {
3190 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3191 	size_t rp_size = sizeof(mgmt_rp);
3192 	struct mgmt_pending_cmd *cmd;
3193 
3194 	BT_DBG("%s status %u", hdev->name, status);
3195 
3196 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3197 	if (!cmd)
3198 		return;
3199 
3200 	if (status || !skb) {
3201 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3202 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3203 		goto remove;
3204 	}
3205 
3206 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3207 
3208 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3209 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3210 
3211 		if (skb->len < sizeof(*rp)) {
3212 			mgmt_cmd_status(cmd->sk, hdev->id,
3213 					MGMT_OP_READ_LOCAL_OOB_DATA,
3214 					MGMT_STATUS_FAILED);
3215 			goto remove;
3216 		}
3217 
3218 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3219 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3220 
3221 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3222 	} else {
3223 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3224 
3225 		if (skb->len < sizeof(*rp)) {
3226 			mgmt_cmd_status(cmd->sk, hdev->id,
3227 					MGMT_OP_READ_LOCAL_OOB_DATA,
3228 					MGMT_STATUS_FAILED);
3229 			goto remove;
3230 		}
3231 
3232 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3233 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3234 
3235 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3236 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3237 	}
3238 
3239 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3240 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3241 
3242 remove:
3243 	mgmt_pending_remove(cmd);
3244 }
3245 
3246 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3247 			       void *data, u16 data_len)
3248 {
3249 	struct mgmt_pending_cmd *cmd;
3250 	struct hci_request req;
3251 	int err;
3252 
3253 	BT_DBG("%s", hdev->name);
3254 
3255 	hci_dev_lock(hdev);
3256 
3257 	if (!hdev_is_powered(hdev)) {
3258 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3259 				      MGMT_STATUS_NOT_POWERED);
3260 		goto unlock;
3261 	}
3262 
3263 	if (!lmp_ssp_capable(hdev)) {
3264 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3265 				      MGMT_STATUS_NOT_SUPPORTED);
3266 		goto unlock;
3267 	}
3268 
3269 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3270 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3271 				      MGMT_STATUS_BUSY);
3272 		goto unlock;
3273 	}
3274 
3275 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3276 	if (!cmd) {
3277 		err = -ENOMEM;
3278 		goto unlock;
3279 	}
3280 
3281 	hci_req_init(&req, hdev);
3282 
3283 	if (bredr_sc_enabled(hdev))
3284 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3285 	else
3286 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3287 
3288 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3289 	if (err < 0)
3290 		mgmt_pending_remove(cmd);
3291 
3292 unlock:
3293 	hci_dev_unlock(hdev);
3294 	return err;
3295 }
3296 
3297 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3298 			       void *data, u16 len)
3299 {
3300 	struct mgmt_addr_info *addr = data;
3301 	int err;
3302 
3303 	BT_DBG("%s ", hdev->name);
3304 
3305 	if (!bdaddr_type_is_valid(addr->type))
3306 		return mgmt_cmd_complete(sk, hdev->id,
3307 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3308 					 MGMT_STATUS_INVALID_PARAMS,
3309 					 addr, sizeof(*addr));
3310 
3311 	hci_dev_lock(hdev);
3312 
3313 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3314 		struct mgmt_cp_add_remote_oob_data *cp = data;
3315 		u8 status;
3316 
3317 		if (cp->addr.type != BDADDR_BREDR) {
3318 			err = mgmt_cmd_complete(sk, hdev->id,
3319 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3320 						MGMT_STATUS_INVALID_PARAMS,
3321 						&cp->addr, sizeof(cp->addr));
3322 			goto unlock;
3323 		}
3324 
3325 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3326 					      cp->addr.type, cp->hash,
3327 					      cp->rand, NULL, NULL);
3328 		if (err < 0)
3329 			status = MGMT_STATUS_FAILED;
3330 		else
3331 			status = MGMT_STATUS_SUCCESS;
3332 
3333 		err = mgmt_cmd_complete(sk, hdev->id,
3334 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3335 					&cp->addr, sizeof(cp->addr));
3336 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3337 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3338 		u8 *rand192, *hash192, *rand256, *hash256;
3339 		u8 status;
3340 
3341 		if (bdaddr_type_is_le(cp->addr.type)) {
3342 			/* Enforce zero-valued 192-bit parameters as
3343 			 * long as legacy SMP OOB isn't implemented.
3344 			 */
3345 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3346 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3347 				err = mgmt_cmd_complete(sk, hdev->id,
3348 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3349 							MGMT_STATUS_INVALID_PARAMS,
3350 							addr, sizeof(*addr));
3351 				goto unlock;
3352 			}
3353 
3354 			rand192 = NULL;
3355 			hash192 = NULL;
3356 		} else {
3357 			/* In case one of the P-192 values is set to zero,
3358 			 * then just disable OOB data for P-192.
3359 			 */
3360 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3361 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3362 				rand192 = NULL;
3363 				hash192 = NULL;
3364 			} else {
3365 				rand192 = cp->rand192;
3366 				hash192 = cp->hash192;
3367 			}
3368 		}
3369 
3370 		/* In case one of the P-256 values is set to zero, then just
3371 		 * disable OOB data for P-256.
3372 		 */
3373 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3374 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3375 			rand256 = NULL;
3376 			hash256 = NULL;
3377 		} else {
3378 			rand256 = cp->rand256;
3379 			hash256 = cp->hash256;
3380 		}
3381 
3382 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3383 					      cp->addr.type, hash192, rand192,
3384 					      hash256, rand256);
3385 		if (err < 0)
3386 			status = MGMT_STATUS_FAILED;
3387 		else
3388 			status = MGMT_STATUS_SUCCESS;
3389 
3390 		err = mgmt_cmd_complete(sk, hdev->id,
3391 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3392 					status, &cp->addr, sizeof(cp->addr));
3393 	} else {
3394 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3395 			   len);
3396 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3397 				      MGMT_STATUS_INVALID_PARAMS);
3398 	}
3399 
3400 unlock:
3401 	hci_dev_unlock(hdev);
3402 	return err;
3403 }
3404 
3405 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3406 				  void *data, u16 len)
3407 {
3408 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3409 	u8 status;
3410 	int err;
3411 
3412 	BT_DBG("%s", hdev->name);
3413 
3414 	if (cp->addr.type != BDADDR_BREDR)
3415 		return mgmt_cmd_complete(sk, hdev->id,
3416 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3417 					 MGMT_STATUS_INVALID_PARAMS,
3418 					 &cp->addr, sizeof(cp->addr));
3419 
3420 	hci_dev_lock(hdev);
3421 
3422 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3423 		hci_remote_oob_data_clear(hdev);
3424 		status = MGMT_STATUS_SUCCESS;
3425 		goto done;
3426 	}
3427 
3428 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3429 	if (err < 0)
3430 		status = MGMT_STATUS_INVALID_PARAMS;
3431 	else
3432 		status = MGMT_STATUS_SUCCESS;
3433 
3434 done:
3435 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3436 				status, &cp->addr, sizeof(cp->addr));
3437 
3438 	hci_dev_unlock(hdev);
3439 	return err;
3440 }
3441 
3442 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3443 {
3444 	struct mgmt_pending_cmd *cmd;
3445 
3446 	BT_DBG("status %d", status);
3447 
3448 	hci_dev_lock(hdev);
3449 
3450 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3451 	if (!cmd)
3452 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3453 
3454 	if (!cmd)
3455 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3456 
3457 	if (cmd) {
3458 		cmd->cmd_complete(cmd, mgmt_status(status));
3459 		mgmt_pending_remove(cmd);
3460 	}
3461 
3462 	hci_dev_unlock(hdev);
3463 }
3464 
3465 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3466 				    uint8_t *mgmt_status)
3467 {
3468 	switch (type) {
3469 	case DISCOV_TYPE_LE:
3470 		*mgmt_status = mgmt_le_support(hdev);
3471 		if (*mgmt_status)
3472 			return false;
3473 		break;
3474 	case DISCOV_TYPE_INTERLEAVED:
3475 		*mgmt_status = mgmt_le_support(hdev);
3476 		if (*mgmt_status)
3477 			return false;
3478 		/* Intentional fall-through */
3479 	case DISCOV_TYPE_BREDR:
3480 		*mgmt_status = mgmt_bredr_support(hdev);
3481 		if (*mgmt_status)
3482 			return false;
3483 		break;
3484 	default:
3485 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3486 		return false;
3487 	}
3488 
3489 	return true;
3490 }
3491 
3492 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3493 				    u16 op, void *data, u16 len)
3494 {
3495 	struct mgmt_cp_start_discovery *cp = data;
3496 	struct mgmt_pending_cmd *cmd;
3497 	u8 status;
3498 	int err;
3499 
3500 	BT_DBG("%s", hdev->name);
3501 
3502 	hci_dev_lock(hdev);
3503 
3504 	if (!hdev_is_powered(hdev)) {
3505 		err = mgmt_cmd_complete(sk, hdev->id, op,
3506 					MGMT_STATUS_NOT_POWERED,
3507 					&cp->type, sizeof(cp->type));
3508 		goto failed;
3509 	}
3510 
3511 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3512 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3513 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3514 					&cp->type, sizeof(cp->type));
3515 		goto failed;
3516 	}
3517 
3518 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3519 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
3520 					&cp->type, sizeof(cp->type));
3521 		goto failed;
3522 	}
3523 
3524 	/* Clear the discovery filter first to free any previously
3525 	 * allocated memory for the UUID list.
3526 	 */
3527 	hci_discovery_filter_clear(hdev);
3528 
3529 	hdev->discovery.type = cp->type;
3530 	hdev->discovery.report_invalid_rssi = false;
3531 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3532 		hdev->discovery.limited = true;
3533 	else
3534 		hdev->discovery.limited = false;
3535 
3536 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
3537 	if (!cmd) {
3538 		err = -ENOMEM;
3539 		goto failed;
3540 	}
3541 
3542 	cmd->cmd_complete = generic_cmd_complete;
3543 
3544 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3545 	queue_work(hdev->req_workqueue, &hdev->discov_update);
3546 	err = 0;
3547 
3548 failed:
3549 	hci_dev_unlock(hdev);
3550 	return err;
3551 }
3552 
3553 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3554 			   void *data, u16 len)
3555 {
3556 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3557 					data, len);
3558 }
3559 
3560 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3561 				   void *data, u16 len)
3562 {
3563 	return start_discovery_internal(sk, hdev,
3564 					MGMT_OP_START_LIMITED_DISCOVERY,
3565 					data, len);
3566 }
3567 
3568 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3569 					  u8 status)
3570 {
3571 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3572 				 cmd->param, 1);
3573 }
3574 
3575 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3576 				   void *data, u16 len)
3577 {
3578 	struct mgmt_cp_start_service_discovery *cp = data;
3579 	struct mgmt_pending_cmd *cmd;
3580 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3581 	u16 uuid_count, expected_len;
3582 	u8 status;
3583 	int err;
3584 
3585 	BT_DBG("%s", hdev->name);
3586 
3587 	hci_dev_lock(hdev);
3588 
3589 	if (!hdev_is_powered(hdev)) {
3590 		err = mgmt_cmd_complete(sk, hdev->id,
3591 					MGMT_OP_START_SERVICE_DISCOVERY,
3592 					MGMT_STATUS_NOT_POWERED,
3593 					&cp->type, sizeof(cp->type));
3594 		goto failed;
3595 	}
3596 
3597 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3598 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3599 		err = mgmt_cmd_complete(sk, hdev->id,
3600 					MGMT_OP_START_SERVICE_DISCOVERY,
3601 					MGMT_STATUS_BUSY, &cp->type,
3602 					sizeof(cp->type));
3603 		goto failed;
3604 	}
3605 
3606 	uuid_count = __le16_to_cpu(cp->uuid_count);
3607 	if (uuid_count > max_uuid_count) {
3608 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3609 			   uuid_count);
3610 		err = mgmt_cmd_complete(sk, hdev->id,
3611 					MGMT_OP_START_SERVICE_DISCOVERY,
3612 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3613 					sizeof(cp->type));
3614 		goto failed;
3615 	}
3616 
3617 	expected_len = sizeof(*cp) + uuid_count * 16;
3618 	if (expected_len != len) {
3619 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3620 			   expected_len, len);
3621 		err = mgmt_cmd_complete(sk, hdev->id,
3622 					MGMT_OP_START_SERVICE_DISCOVERY,
3623 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
3624 					sizeof(cp->type));
3625 		goto failed;
3626 	}
3627 
3628 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3629 		err = mgmt_cmd_complete(sk, hdev->id,
3630 					MGMT_OP_START_SERVICE_DISCOVERY,
3631 					status, &cp->type, sizeof(cp->type));
3632 		goto failed;
3633 	}
3634 
3635 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3636 			       hdev, data, len);
3637 	if (!cmd) {
3638 		err = -ENOMEM;
3639 		goto failed;
3640 	}
3641 
3642 	cmd->cmd_complete = service_discovery_cmd_complete;
3643 
3644 	/* Clear the discovery filter first to free any previously
3645 	 * allocated memory for the UUID list.
3646 	 */
3647 	hci_discovery_filter_clear(hdev);
3648 
3649 	hdev->discovery.result_filtering = true;
3650 	hdev->discovery.type = cp->type;
3651 	hdev->discovery.rssi = cp->rssi;
3652 	hdev->discovery.uuid_count = uuid_count;
3653 
3654 	if (uuid_count > 0) {
3655 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
3656 						GFP_KERNEL);
3657 		if (!hdev->discovery.uuids) {
3658 			err = mgmt_cmd_complete(sk, hdev->id,
3659 						MGMT_OP_START_SERVICE_DISCOVERY,
3660 						MGMT_STATUS_FAILED,
3661 						&cp->type, sizeof(cp->type));
3662 			mgmt_pending_remove(cmd);
3663 			goto failed;
3664 		}
3665 	}
3666 
3667 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3668 	queue_work(hdev->req_workqueue, &hdev->discov_update);
3669 	err = 0;
3670 
3671 failed:
3672 	hci_dev_unlock(hdev);
3673 	return err;
3674 }
3675 
3676 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
3677 {
3678 	struct mgmt_pending_cmd *cmd;
3679 
3680 	BT_DBG("status %d", status);
3681 
3682 	hci_dev_lock(hdev);
3683 
3684 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3685 	if (cmd) {
3686 		cmd->cmd_complete(cmd, mgmt_status(status));
3687 		mgmt_pending_remove(cmd);
3688 	}
3689 
3690 	hci_dev_unlock(hdev);
3691 }
3692 
3693 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3694 			  u16 len)
3695 {
3696 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
3697 	struct mgmt_pending_cmd *cmd;
3698 	int err;
3699 
3700 	BT_DBG("%s", hdev->name);
3701 
3702 	hci_dev_lock(hdev);
3703 
3704 	if (!hci_discovery_active(hdev)) {
3705 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3706 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
3707 					sizeof(mgmt_cp->type));
3708 		goto unlock;
3709 	}
3710 
3711 	if (hdev->discovery.type != mgmt_cp->type) {
3712 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3713 					MGMT_STATUS_INVALID_PARAMS,
3714 					&mgmt_cp->type, sizeof(mgmt_cp->type));
3715 		goto unlock;
3716 	}
3717 
3718 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
3719 	if (!cmd) {
3720 		err = -ENOMEM;
3721 		goto unlock;
3722 	}
3723 
3724 	cmd->cmd_complete = generic_cmd_complete;
3725 
3726 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3727 	queue_work(hdev->req_workqueue, &hdev->discov_update);
3728 	err = 0;
3729 
3730 unlock:
3731 	hci_dev_unlock(hdev);
3732 	return err;
3733 }
3734 
3735 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3736 			u16 len)
3737 {
3738 	struct mgmt_cp_confirm_name *cp = data;
3739 	struct inquiry_entry *e;
3740 	int err;
3741 
3742 	BT_DBG("%s", hdev->name);
3743 
3744 	hci_dev_lock(hdev);
3745 
3746 	if (!hci_discovery_active(hdev)) {
3747 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3748 					MGMT_STATUS_FAILED, &cp->addr,
3749 					sizeof(cp->addr));
3750 		goto failed;
3751 	}
3752 
3753 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3754 	if (!e) {
3755 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3756 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3757 					sizeof(cp->addr));
3758 		goto failed;
3759 	}
3760 
3761 	if (cp->name_known) {
3762 		e->name_state = NAME_KNOWN;
3763 		list_del(&e->list);
3764 	} else {
3765 		e->name_state = NAME_NEEDED;
3766 		hci_inquiry_cache_update_resolve(hdev, e);
3767 	}
3768 
3769 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
3770 				&cp->addr, sizeof(cp->addr));
3771 
3772 failed:
3773 	hci_dev_unlock(hdev);
3774 	return err;
3775 }
3776 
3777 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3778 			u16 len)
3779 {
3780 	struct mgmt_cp_block_device *cp = data;
3781 	u8 status;
3782 	int err;
3783 
3784 	BT_DBG("%s", hdev->name);
3785 
3786 	if (!bdaddr_type_is_valid(cp->addr.type))
3787 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3788 					 MGMT_STATUS_INVALID_PARAMS,
3789 					 &cp->addr, sizeof(cp->addr));
3790 
3791 	hci_dev_lock(hdev);
3792 
3793 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3794 				  cp->addr.type);
3795 	if (err < 0) {
3796 		status = MGMT_STATUS_FAILED;
3797 		goto done;
3798 	}
3799 
3800 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3801 		   sk);
3802 	status = MGMT_STATUS_SUCCESS;
3803 
3804 done:
3805 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3806 				&cp->addr, sizeof(cp->addr));
3807 
3808 	hci_dev_unlock(hdev);
3809 
3810 	return err;
3811 }
3812 
3813 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3814 			  u16 len)
3815 {
3816 	struct mgmt_cp_unblock_device *cp = data;
3817 	u8 status;
3818 	int err;
3819 
3820 	BT_DBG("%s", hdev->name);
3821 
3822 	if (!bdaddr_type_is_valid(cp->addr.type))
3823 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3824 					 MGMT_STATUS_INVALID_PARAMS,
3825 					 &cp->addr, sizeof(cp->addr));
3826 
3827 	hci_dev_lock(hdev);
3828 
3829 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3830 				  cp->addr.type);
3831 	if (err < 0) {
3832 		status = MGMT_STATUS_INVALID_PARAMS;
3833 		goto done;
3834 	}
3835 
3836 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3837 		   sk);
3838 	status = MGMT_STATUS_SUCCESS;
3839 
3840 done:
3841 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3842 				&cp->addr, sizeof(cp->addr));
3843 
3844 	hci_dev_unlock(hdev);
3845 
3846 	return err;
3847 }
3848 
3849 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3850 			 u16 len)
3851 {
3852 	struct mgmt_cp_set_device_id *cp = data;
3853 	struct hci_request req;
3854 	int err;
3855 	__u16 source;
3856 
3857 	BT_DBG("%s", hdev->name);
3858 
3859 	source = __le16_to_cpu(cp->source);
3860 
3861 	if (source > 0x0002)
3862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3863 				       MGMT_STATUS_INVALID_PARAMS);
3864 
3865 	hci_dev_lock(hdev);
3866 
3867 	hdev->devid_source = source;
3868 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3869 	hdev->devid_product = __le16_to_cpu(cp->product);
3870 	hdev->devid_version = __le16_to_cpu(cp->version);
3871 
3872 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
3873 				NULL, 0);
3874 
3875 	hci_req_init(&req, hdev);
3876 	__hci_req_update_eir(&req);
3877 	hci_req_run(&req, NULL);
3878 
3879 	hci_dev_unlock(hdev);
3880 
3881 	return err;
3882 }
3883 
3884 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
3885 					u16 opcode)
3886 {
3887 	BT_DBG("status %d", status);
3888 }
3889 
3890 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
3891 				     u16 opcode)
3892 {
3893 	struct cmd_lookup match = { NULL, hdev };
3894 	struct hci_request req;
3895 	u8 instance;
3896 	struct adv_info *adv_instance;
3897 	int err;
3898 
3899 	hci_dev_lock(hdev);
3900 
3901 	if (status) {
3902 		u8 mgmt_err = mgmt_status(status);
3903 
3904 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3905 				     cmd_status_rsp, &mgmt_err);
3906 		goto unlock;
3907 	}
3908 
3909 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3910 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
3911 	else
3912 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3913 
3914 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3915 			     &match);
3916 
3917 	new_settings(hdev, match.sk);
3918 
3919 	if (match.sk)
3920 		sock_put(match.sk);
3921 
3922 	/* If "Set Advertising" was just disabled and instance advertising was
3923 	 * set up earlier, then re-enable multi-instance advertising.
3924 	 */
3925 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3926 	    list_empty(&hdev->adv_instances))
3927 		goto unlock;
3928 
3929 	instance = hdev->cur_adv_instance;
3930 	if (!instance) {
3931 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
3932 							struct adv_info, list);
3933 		if (!adv_instance)
3934 			goto unlock;
3935 
3936 		instance = adv_instance->instance;
3937 	}
3938 
3939 	hci_req_init(&req, hdev);
3940 
3941 	err = __hci_req_schedule_adv_instance(&req, instance, true);
3942 
3943 	if (!err)
3944 		err = hci_req_run(&req, enable_advertising_instance);
3945 
3946 	if (err)
3947 		bt_dev_err(hdev, "failed to re-configure advertising");
3948 
3949 unlock:
3950 	hci_dev_unlock(hdev);
3951 }
3952 
3953 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3954 			   u16 len)
3955 {
3956 	struct mgmt_mode *cp = data;
3957 	struct mgmt_pending_cmd *cmd;
3958 	struct hci_request req;
3959 	u8 val, status;
3960 	int err;
3961 
3962 	BT_DBG("request for %s", hdev->name);
3963 
3964 	status = mgmt_le_support(hdev);
3965 	if (status)
3966 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3967 				       status);
3968 
3969 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
3970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3971 				       MGMT_STATUS_INVALID_PARAMS);
3972 
3973 	hci_dev_lock(hdev);
3974 
3975 	val = !!cp->val;
3976 
3977 	/* The following conditions are ones which mean that we should
3978 	 * not do any HCI communication but directly send a mgmt
3979 	 * response to user space (after toggling the flag if
3980 	 * necessary).
3981 	 */
3982 	if (!hdev_is_powered(hdev) ||
3983 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
3984 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
3985 	    hci_conn_num(hdev, LE_LINK) > 0 ||
3986 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
3987 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
3988 		bool changed;
3989 
3990 		if (cp->val) {
3991 			hdev->cur_adv_instance = 0x00;
3992 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
3993 			if (cp->val == 0x02)
3994 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3995 			else
3996 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
3997 		} else {
3998 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
3999 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4000 		}
4001 
4002 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4003 		if (err < 0)
4004 			goto unlock;
4005 
4006 		if (changed)
4007 			err = new_settings(hdev, sk);
4008 
4009 		goto unlock;
4010 	}
4011 
4012 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4013 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4014 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4015 				      MGMT_STATUS_BUSY);
4016 		goto unlock;
4017 	}
4018 
4019 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4020 	if (!cmd) {
4021 		err = -ENOMEM;
4022 		goto unlock;
4023 	}
4024 
4025 	hci_req_init(&req, hdev);
4026 
4027 	if (cp->val == 0x02)
4028 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4029 	else
4030 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4031 
4032 	cancel_adv_timeout(hdev);
4033 
4034 	if (val) {
4035 		/* Switch to instance "0" for the Set Advertising setting.
4036 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4037 		 * HCI_ADVERTISING flag is not yet set.
4038 		 */
4039 		hdev->cur_adv_instance = 0x00;
4040 		__hci_req_update_adv_data(&req, 0x00);
4041 		__hci_req_update_scan_rsp_data(&req, 0x00);
4042 		__hci_req_enable_advertising(&req);
4043 	} else {
4044 		__hci_req_disable_advertising(&req);
4045 	}
4046 
4047 	err = hci_req_run(&req, set_advertising_complete);
4048 	if (err < 0)
4049 		mgmt_pending_remove(cmd);
4050 
4051 unlock:
4052 	hci_dev_unlock(hdev);
4053 	return err;
4054 }
4055 
4056 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4057 			      void *data, u16 len)
4058 {
4059 	struct mgmt_cp_set_static_address *cp = data;
4060 	int err;
4061 
4062 	BT_DBG("%s", hdev->name);
4063 
4064 	if (!lmp_le_capable(hdev))
4065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4066 				       MGMT_STATUS_NOT_SUPPORTED);
4067 
4068 	if (hdev_is_powered(hdev))
4069 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4070 				       MGMT_STATUS_REJECTED);
4071 
4072 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4073 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4074 			return mgmt_cmd_status(sk, hdev->id,
4075 					       MGMT_OP_SET_STATIC_ADDRESS,
4076 					       MGMT_STATUS_INVALID_PARAMS);
4077 
4078 		/* Two most significant bits shall be set */
4079 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4080 			return mgmt_cmd_status(sk, hdev->id,
4081 					       MGMT_OP_SET_STATIC_ADDRESS,
4082 					       MGMT_STATUS_INVALID_PARAMS);
4083 	}
4084 
4085 	hci_dev_lock(hdev);
4086 
4087 	bacpy(&hdev->static_addr, &cp->bdaddr);
4088 
4089 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4090 	if (err < 0)
4091 		goto unlock;
4092 
4093 	err = new_settings(hdev, sk);
4094 
4095 unlock:
4096 	hci_dev_unlock(hdev);
4097 	return err;
4098 }
4099 
4100 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4101 			   void *data, u16 len)
4102 {
4103 	struct mgmt_cp_set_scan_params *cp = data;
4104 	__u16 interval, window;
4105 	int err;
4106 
4107 	BT_DBG("%s", hdev->name);
4108 
4109 	if (!lmp_le_capable(hdev))
4110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4111 				       MGMT_STATUS_NOT_SUPPORTED);
4112 
4113 	interval = __le16_to_cpu(cp->interval);
4114 
4115 	if (interval < 0x0004 || interval > 0x4000)
4116 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4117 				       MGMT_STATUS_INVALID_PARAMS);
4118 
4119 	window = __le16_to_cpu(cp->window);
4120 
4121 	if (window < 0x0004 || window > 0x4000)
4122 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4123 				       MGMT_STATUS_INVALID_PARAMS);
4124 
4125 	if (window > interval)
4126 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4127 				       MGMT_STATUS_INVALID_PARAMS);
4128 
4129 	hci_dev_lock(hdev);
4130 
4131 	hdev->le_scan_interval = interval;
4132 	hdev->le_scan_window = window;
4133 
4134 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4135 				NULL, 0);
4136 
4137 	/* If background scan is running, restart it so new parameters are
4138 	 * loaded.
4139 	 */
4140 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4141 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4142 		struct hci_request req;
4143 
4144 		hci_req_init(&req, hdev);
4145 
4146 		hci_req_add_le_scan_disable(&req);
4147 		hci_req_add_le_passive_scan(&req);
4148 
4149 		hci_req_run(&req, NULL);
4150 	}
4151 
4152 	hci_dev_unlock(hdev);
4153 
4154 	return err;
4155 }
4156 
4157 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4158 				      u16 opcode)
4159 {
4160 	struct mgmt_pending_cmd *cmd;
4161 
4162 	BT_DBG("status 0x%02x", status);
4163 
4164 	hci_dev_lock(hdev);
4165 
4166 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4167 	if (!cmd)
4168 		goto unlock;
4169 
4170 	if (status) {
4171 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4172 			        mgmt_status(status));
4173 	} else {
4174 		struct mgmt_mode *cp = cmd->param;
4175 
4176 		if (cp->val)
4177 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4178 		else
4179 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4180 
4181 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4182 		new_settings(hdev, cmd->sk);
4183 	}
4184 
4185 	mgmt_pending_remove(cmd);
4186 
4187 unlock:
4188 	hci_dev_unlock(hdev);
4189 }
4190 
4191 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4192 				void *data, u16 len)
4193 {
4194 	struct mgmt_mode *cp = data;
4195 	struct mgmt_pending_cmd *cmd;
4196 	struct hci_request req;
4197 	int err;
4198 
4199 	BT_DBG("%s", hdev->name);
4200 
4201 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4202 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4203 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4204 				       MGMT_STATUS_NOT_SUPPORTED);
4205 
4206 	if (cp->val != 0x00 && cp->val != 0x01)
4207 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4208 				       MGMT_STATUS_INVALID_PARAMS);
4209 
4210 	hci_dev_lock(hdev);
4211 
4212 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4213 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4214 				      MGMT_STATUS_BUSY);
4215 		goto unlock;
4216 	}
4217 
4218 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4219 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4220 					hdev);
4221 		goto unlock;
4222 	}
4223 
4224 	if (!hdev_is_powered(hdev)) {
4225 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4226 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4227 					hdev);
4228 		new_settings(hdev, sk);
4229 		goto unlock;
4230 	}
4231 
4232 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4233 			       data, len);
4234 	if (!cmd) {
4235 		err = -ENOMEM;
4236 		goto unlock;
4237 	}
4238 
4239 	hci_req_init(&req, hdev);
4240 
4241 	__hci_req_write_fast_connectable(&req, cp->val);
4242 
4243 	err = hci_req_run(&req, fast_connectable_complete);
4244 	if (err < 0) {
4245 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4246 				      MGMT_STATUS_FAILED);
4247 		mgmt_pending_remove(cmd);
4248 	}
4249 
4250 unlock:
4251 	hci_dev_unlock(hdev);
4252 
4253 	return err;
4254 }
4255 
4256 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4257 {
4258 	struct mgmt_pending_cmd *cmd;
4259 
4260 	BT_DBG("status 0x%02x", status);
4261 
4262 	hci_dev_lock(hdev);
4263 
4264 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4265 	if (!cmd)
4266 		goto unlock;
4267 
4268 	if (status) {
4269 		u8 mgmt_err = mgmt_status(status);
4270 
4271 		/* We need to restore the flag if related HCI commands
4272 		 * failed.
4273 		 */
4274 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4275 
4276 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4277 	} else {
4278 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4279 		new_settings(hdev, cmd->sk);
4280 	}
4281 
4282 	mgmt_pending_remove(cmd);
4283 
4284 unlock:
4285 	hci_dev_unlock(hdev);
4286 }
4287 
4288 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4289 {
4290 	struct mgmt_mode *cp = data;
4291 	struct mgmt_pending_cmd *cmd;
4292 	struct hci_request req;
4293 	int err;
4294 
4295 	BT_DBG("request for %s", hdev->name);
4296 
4297 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4299 				       MGMT_STATUS_NOT_SUPPORTED);
4300 
4301 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4302 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4303 				       MGMT_STATUS_REJECTED);
4304 
4305 	if (cp->val != 0x00 && cp->val != 0x01)
4306 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4307 				       MGMT_STATUS_INVALID_PARAMS);
4308 
4309 	hci_dev_lock(hdev);
4310 
4311 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4312 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4313 		goto unlock;
4314 	}
4315 
4316 	if (!hdev_is_powered(hdev)) {
4317 		if (!cp->val) {
4318 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4319 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4320 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4321 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4322 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4323 		}
4324 
4325 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4326 
4327 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4328 		if (err < 0)
4329 			goto unlock;
4330 
4331 		err = new_settings(hdev, sk);
4332 		goto unlock;
4333 	}
4334 
4335 	/* Reject disabling when powered on */
4336 	if (!cp->val) {
4337 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4338 				      MGMT_STATUS_REJECTED);
4339 		goto unlock;
4340 	} else {
4341 		/* When configuring a dual-mode controller to operate
4342 		 * with LE only and using a static address, then switching
4343 		 * BR/EDR back on is not allowed.
4344 		 *
4345 		 * Dual-mode controllers shall operate with the public
4346 		 * address as its identity address for BR/EDR and LE. So
4347 		 * reject the attempt to create an invalid configuration.
4348 		 *
4349 		 * The same restrictions applies when secure connections
4350 		 * has been enabled. For BR/EDR this is a controller feature
4351 		 * while for LE it is a host stack feature. This means that
4352 		 * switching BR/EDR back on when secure connections has been
4353 		 * enabled is not a supported transaction.
4354 		 */
4355 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4356 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4357 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4358 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4359 					      MGMT_STATUS_REJECTED);
4360 			goto unlock;
4361 		}
4362 	}
4363 
4364 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4365 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4366 				      MGMT_STATUS_BUSY);
4367 		goto unlock;
4368 	}
4369 
4370 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4371 	if (!cmd) {
4372 		err = -ENOMEM;
4373 		goto unlock;
4374 	}
4375 
4376 	/* We need to flip the bit already here so that
4377 	 * hci_req_update_adv_data generates the correct flags.
4378 	 */
4379 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4380 
4381 	hci_req_init(&req, hdev);
4382 
4383 	__hci_req_write_fast_connectable(&req, false);
4384 	__hci_req_update_scan(&req);
4385 
4386 	/* Since only the advertising data flags will change, there
4387 	 * is no need to update the scan response data.
4388 	 */
4389 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4390 
4391 	err = hci_req_run(&req, set_bredr_complete);
4392 	if (err < 0)
4393 		mgmt_pending_remove(cmd);
4394 
4395 unlock:
4396 	hci_dev_unlock(hdev);
4397 	return err;
4398 }
4399 
4400 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4401 {
4402 	struct mgmt_pending_cmd *cmd;
4403 	struct mgmt_mode *cp;
4404 
4405 	BT_DBG("%s status %u", hdev->name, status);
4406 
4407 	hci_dev_lock(hdev);
4408 
4409 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4410 	if (!cmd)
4411 		goto unlock;
4412 
4413 	if (status) {
4414 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4415 			        mgmt_status(status));
4416 		goto remove;
4417 	}
4418 
4419 	cp = cmd->param;
4420 
4421 	switch (cp->val) {
4422 	case 0x00:
4423 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4424 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4425 		break;
4426 	case 0x01:
4427 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4428 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4429 		break;
4430 	case 0x02:
4431 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4432 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4433 		break;
4434 	}
4435 
4436 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4437 	new_settings(hdev, cmd->sk);
4438 
4439 remove:
4440 	mgmt_pending_remove(cmd);
4441 unlock:
4442 	hci_dev_unlock(hdev);
4443 }
4444 
4445 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4446 			   void *data, u16 len)
4447 {
4448 	struct mgmt_mode *cp = data;
4449 	struct mgmt_pending_cmd *cmd;
4450 	struct hci_request req;
4451 	u8 val;
4452 	int err;
4453 
4454 	BT_DBG("request for %s", hdev->name);
4455 
4456 	if (!lmp_sc_capable(hdev) &&
4457 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4459 				       MGMT_STATUS_NOT_SUPPORTED);
4460 
4461 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4462 	    lmp_sc_capable(hdev) &&
4463 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4464 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4465 				       MGMT_STATUS_REJECTED);
4466 
4467 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4469 				  MGMT_STATUS_INVALID_PARAMS);
4470 
4471 	hci_dev_lock(hdev);
4472 
4473 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4474 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4475 		bool changed;
4476 
4477 		if (cp->val) {
4478 			changed = !hci_dev_test_and_set_flag(hdev,
4479 							     HCI_SC_ENABLED);
4480 			if (cp->val == 0x02)
4481 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4482 			else
4483 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4484 		} else {
4485 			changed = hci_dev_test_and_clear_flag(hdev,
4486 							      HCI_SC_ENABLED);
4487 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4488 		}
4489 
4490 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4491 		if (err < 0)
4492 			goto failed;
4493 
4494 		if (changed)
4495 			err = new_settings(hdev, sk);
4496 
4497 		goto failed;
4498 	}
4499 
4500 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4501 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4502 				      MGMT_STATUS_BUSY);
4503 		goto failed;
4504 	}
4505 
4506 	val = !!cp->val;
4507 
4508 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4509 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4510 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4511 		goto failed;
4512 	}
4513 
4514 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4515 	if (!cmd) {
4516 		err = -ENOMEM;
4517 		goto failed;
4518 	}
4519 
4520 	hci_req_init(&req, hdev);
4521 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4522 	err = hci_req_run(&req, sc_enable_complete);
4523 	if (err < 0) {
4524 		mgmt_pending_remove(cmd);
4525 		goto failed;
4526 	}
4527 
4528 failed:
4529 	hci_dev_unlock(hdev);
4530 	return err;
4531 }
4532 
4533 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4534 			  void *data, u16 len)
4535 {
4536 	struct mgmt_mode *cp = data;
4537 	bool changed, use_changed;
4538 	int err;
4539 
4540 	BT_DBG("request for %s", hdev->name);
4541 
4542 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4543 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4544 				       MGMT_STATUS_INVALID_PARAMS);
4545 
4546 	hci_dev_lock(hdev);
4547 
4548 	if (cp->val)
4549 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4550 	else
4551 		changed = hci_dev_test_and_clear_flag(hdev,
4552 						      HCI_KEEP_DEBUG_KEYS);
4553 
4554 	if (cp->val == 0x02)
4555 		use_changed = !hci_dev_test_and_set_flag(hdev,
4556 							 HCI_USE_DEBUG_KEYS);
4557 	else
4558 		use_changed = hci_dev_test_and_clear_flag(hdev,
4559 							  HCI_USE_DEBUG_KEYS);
4560 
4561 	if (hdev_is_powered(hdev) && use_changed &&
4562 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4563 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4564 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4565 			     sizeof(mode), &mode);
4566 	}
4567 
4568 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4569 	if (err < 0)
4570 		goto unlock;
4571 
4572 	if (changed)
4573 		err = new_settings(hdev, sk);
4574 
4575 unlock:
4576 	hci_dev_unlock(hdev);
4577 	return err;
4578 }
4579 
4580 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4581 		       u16 len)
4582 {
4583 	struct mgmt_cp_set_privacy *cp = cp_data;
4584 	bool changed;
4585 	int err;
4586 
4587 	BT_DBG("request for %s", hdev->name);
4588 
4589 	if (!lmp_le_capable(hdev))
4590 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4591 				       MGMT_STATUS_NOT_SUPPORTED);
4592 
4593 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4594 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4595 				       MGMT_STATUS_INVALID_PARAMS);
4596 
4597 	if (hdev_is_powered(hdev))
4598 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4599 				       MGMT_STATUS_REJECTED);
4600 
4601 	hci_dev_lock(hdev);
4602 
4603 	/* If user space supports this command it is also expected to
4604 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4605 	 */
4606 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4607 
4608 	if (cp->privacy) {
4609 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4610 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4611 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4612 		if (cp->privacy == 0x02)
4613 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4614 		else
4615 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4616 	} else {
4617 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4618 		memset(hdev->irk, 0, sizeof(hdev->irk));
4619 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4620 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4621 	}
4622 
4623 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4624 	if (err < 0)
4625 		goto unlock;
4626 
4627 	if (changed)
4628 		err = new_settings(hdev, sk);
4629 
4630 unlock:
4631 	hci_dev_unlock(hdev);
4632 	return err;
4633 }
4634 
4635 static bool irk_is_valid(struct mgmt_irk_info *irk)
4636 {
4637 	switch (irk->addr.type) {
4638 	case BDADDR_LE_PUBLIC:
4639 		return true;
4640 
4641 	case BDADDR_LE_RANDOM:
4642 		/* Two most significant bits shall be set */
4643 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4644 			return false;
4645 		return true;
4646 	}
4647 
4648 	return false;
4649 }
4650 
4651 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4652 		     u16 len)
4653 {
4654 	struct mgmt_cp_load_irks *cp = cp_data;
4655 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4656 				   sizeof(struct mgmt_irk_info));
4657 	u16 irk_count, expected_len;
4658 	int i, err;
4659 
4660 	BT_DBG("request for %s", hdev->name);
4661 
4662 	if (!lmp_le_capable(hdev))
4663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4664 				       MGMT_STATUS_NOT_SUPPORTED);
4665 
4666 	irk_count = __le16_to_cpu(cp->irk_count);
4667 	if (irk_count > max_irk_count) {
4668 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
4669 			   irk_count);
4670 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4671 				       MGMT_STATUS_INVALID_PARAMS);
4672 	}
4673 
4674 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4675 	if (expected_len != len) {
4676 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
4677 			   expected_len, len);
4678 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4679 				       MGMT_STATUS_INVALID_PARAMS);
4680 	}
4681 
4682 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
4683 
4684 	for (i = 0; i < irk_count; i++) {
4685 		struct mgmt_irk_info *key = &cp->irks[i];
4686 
4687 		if (!irk_is_valid(key))
4688 			return mgmt_cmd_status(sk, hdev->id,
4689 					       MGMT_OP_LOAD_IRKS,
4690 					       MGMT_STATUS_INVALID_PARAMS);
4691 	}
4692 
4693 	hci_dev_lock(hdev);
4694 
4695 	hci_smp_irks_clear(hdev);
4696 
4697 	for (i = 0; i < irk_count; i++) {
4698 		struct mgmt_irk_info *irk = &cp->irks[i];
4699 
4700 		hci_add_irk(hdev, &irk->addr.bdaddr,
4701 			    le_addr_type(irk->addr.type), irk->val,
4702 			    BDADDR_ANY);
4703 	}
4704 
4705 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4706 
4707 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4708 
4709 	hci_dev_unlock(hdev);
4710 
4711 	return err;
4712 }
4713 
4714 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4715 {
4716 	if (key->master != 0x00 && key->master != 0x01)
4717 		return false;
4718 
4719 	switch (key->addr.type) {
4720 	case BDADDR_LE_PUBLIC:
4721 		return true;
4722 
4723 	case BDADDR_LE_RANDOM:
4724 		/* Two most significant bits shall be set */
4725 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4726 			return false;
4727 		return true;
4728 	}
4729 
4730 	return false;
4731 }
4732 
4733 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4734 			       void *cp_data, u16 len)
4735 {
4736 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
4737 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4738 				   sizeof(struct mgmt_ltk_info));
4739 	u16 key_count, expected_len;
4740 	int i, err;
4741 
4742 	BT_DBG("request for %s", hdev->name);
4743 
4744 	if (!lmp_le_capable(hdev))
4745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4746 				       MGMT_STATUS_NOT_SUPPORTED);
4747 
4748 	key_count = __le16_to_cpu(cp->key_count);
4749 	if (key_count > max_key_count) {
4750 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
4751 			   key_count);
4752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4753 				       MGMT_STATUS_INVALID_PARAMS);
4754 	}
4755 
4756 	expected_len = sizeof(*cp) + key_count *
4757 					sizeof(struct mgmt_ltk_info);
4758 	if (expected_len != len) {
4759 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
4760 			   expected_len, len);
4761 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4762 				       MGMT_STATUS_INVALID_PARAMS);
4763 	}
4764 
4765 	BT_DBG("%s key_count %u", hdev->name, key_count);
4766 
4767 	for (i = 0; i < key_count; i++) {
4768 		struct mgmt_ltk_info *key = &cp->keys[i];
4769 
4770 		if (!ltk_is_valid(key))
4771 			return mgmt_cmd_status(sk, hdev->id,
4772 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
4773 					       MGMT_STATUS_INVALID_PARAMS);
4774 	}
4775 
4776 	hci_dev_lock(hdev);
4777 
4778 	hci_smp_ltks_clear(hdev);
4779 
4780 	for (i = 0; i < key_count; i++) {
4781 		struct mgmt_ltk_info *key = &cp->keys[i];
4782 		u8 type, authenticated;
4783 
4784 		switch (key->type) {
4785 		case MGMT_LTK_UNAUTHENTICATED:
4786 			authenticated = 0x00;
4787 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4788 			break;
4789 		case MGMT_LTK_AUTHENTICATED:
4790 			authenticated = 0x01;
4791 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
4792 			break;
4793 		case MGMT_LTK_P256_UNAUTH:
4794 			authenticated = 0x00;
4795 			type = SMP_LTK_P256;
4796 			break;
4797 		case MGMT_LTK_P256_AUTH:
4798 			authenticated = 0x01;
4799 			type = SMP_LTK_P256;
4800 			break;
4801 		case MGMT_LTK_P256_DEBUG:
4802 			authenticated = 0x00;
4803 			type = SMP_LTK_P256_DEBUG;
4804 		default:
4805 			continue;
4806 		}
4807 
4808 		hci_add_ltk(hdev, &key->addr.bdaddr,
4809 			    le_addr_type(key->addr.type), type, authenticated,
4810 			    key->val, key->enc_size, key->ediv, key->rand);
4811 	}
4812 
4813 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4814 			   NULL, 0);
4815 
4816 	hci_dev_unlock(hdev);
4817 
4818 	return err;
4819 }
4820 
4821 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
4822 {
4823 	struct hci_conn *conn = cmd->user_data;
4824 	struct mgmt_rp_get_conn_info rp;
4825 	int err;
4826 
4827 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
4828 
4829 	if (status == MGMT_STATUS_SUCCESS) {
4830 		rp.rssi = conn->rssi;
4831 		rp.tx_power = conn->tx_power;
4832 		rp.max_tx_power = conn->max_tx_power;
4833 	} else {
4834 		rp.rssi = HCI_RSSI_INVALID;
4835 		rp.tx_power = HCI_TX_POWER_INVALID;
4836 		rp.max_tx_power = HCI_TX_POWER_INVALID;
4837 	}
4838 
4839 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4840 				status, &rp, sizeof(rp));
4841 
4842 	hci_conn_drop(conn);
4843 	hci_conn_put(conn);
4844 
4845 	return err;
4846 }
4847 
4848 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
4849 				       u16 opcode)
4850 {
4851 	struct hci_cp_read_rssi *cp;
4852 	struct mgmt_pending_cmd *cmd;
4853 	struct hci_conn *conn;
4854 	u16 handle;
4855 	u8 status;
4856 
4857 	BT_DBG("status 0x%02x", hci_status);
4858 
4859 	hci_dev_lock(hdev);
4860 
4861 	/* Commands sent in request are either Read RSSI or Read Transmit Power
4862 	 * Level so we check which one was last sent to retrieve connection
4863 	 * handle.  Both commands have handle as first parameter so it's safe to
4864 	 * cast data on the same command struct.
4865 	 *
4866 	 * First command sent is always Read RSSI and we fail only if it fails.
4867 	 * In other case we simply override error to indicate success as we
4868 	 * already remembered if TX power value is actually valid.
4869 	 */
4870 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4871 	if (!cp) {
4872 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4873 		status = MGMT_STATUS_SUCCESS;
4874 	} else {
4875 		status = mgmt_status(hci_status);
4876 	}
4877 
4878 	if (!cp) {
4879 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
4880 		goto unlock;
4881 	}
4882 
4883 	handle = __le16_to_cpu(cp->handle);
4884 	conn = hci_conn_hash_lookup_handle(hdev, handle);
4885 	if (!conn) {
4886 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
4887 			   handle);
4888 		goto unlock;
4889 	}
4890 
4891 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
4892 	if (!cmd)
4893 		goto unlock;
4894 
4895 	cmd->cmd_complete(cmd, status);
4896 	mgmt_pending_remove(cmd);
4897 
4898 unlock:
4899 	hci_dev_unlock(hdev);
4900 }
4901 
4902 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4903 			 u16 len)
4904 {
4905 	struct mgmt_cp_get_conn_info *cp = data;
4906 	struct mgmt_rp_get_conn_info rp;
4907 	struct hci_conn *conn;
4908 	unsigned long conn_info_age;
4909 	int err = 0;
4910 
4911 	BT_DBG("%s", hdev->name);
4912 
4913 	memset(&rp, 0, sizeof(rp));
4914 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4915 	rp.addr.type = cp->addr.type;
4916 
4917 	if (!bdaddr_type_is_valid(cp->addr.type))
4918 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4919 					 MGMT_STATUS_INVALID_PARAMS,
4920 					 &rp, sizeof(rp));
4921 
4922 	hci_dev_lock(hdev);
4923 
4924 	if (!hdev_is_powered(hdev)) {
4925 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4926 					MGMT_STATUS_NOT_POWERED, &rp,
4927 					sizeof(rp));
4928 		goto unlock;
4929 	}
4930 
4931 	if (cp->addr.type == BDADDR_BREDR)
4932 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4933 					       &cp->addr.bdaddr);
4934 	else
4935 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4936 
4937 	if (!conn || conn->state != BT_CONNECTED) {
4938 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4939 					MGMT_STATUS_NOT_CONNECTED, &rp,
4940 					sizeof(rp));
4941 		goto unlock;
4942 	}
4943 
4944 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
4945 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4946 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
4947 		goto unlock;
4948 	}
4949 
4950 	/* To avoid client trying to guess when to poll again for information we
4951 	 * calculate conn info age as random value between min/max set in hdev.
4952 	 */
4953 	conn_info_age = hdev->conn_info_min_age +
4954 			prandom_u32_max(hdev->conn_info_max_age -
4955 					hdev->conn_info_min_age);
4956 
4957 	/* Query controller to refresh cached values if they are too old or were
4958 	 * never read.
4959 	 */
4960 	if (time_after(jiffies, conn->conn_info_timestamp +
4961 		       msecs_to_jiffies(conn_info_age)) ||
4962 	    !conn->conn_info_timestamp) {
4963 		struct hci_request req;
4964 		struct hci_cp_read_tx_power req_txp_cp;
4965 		struct hci_cp_read_rssi req_rssi_cp;
4966 		struct mgmt_pending_cmd *cmd;
4967 
4968 		hci_req_init(&req, hdev);
4969 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
4970 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
4971 			    &req_rssi_cp);
4972 
4973 		/* For LE links TX power does not change thus we don't need to
4974 		 * query for it once value is known.
4975 		 */
4976 		if (!bdaddr_type_is_le(cp->addr.type) ||
4977 		    conn->tx_power == HCI_TX_POWER_INVALID) {
4978 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4979 			req_txp_cp.type = 0x00;
4980 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4981 				    sizeof(req_txp_cp), &req_txp_cp);
4982 		}
4983 
4984 		/* Max TX power needs to be read only once per connection */
4985 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
4986 			req_txp_cp.handle = cpu_to_le16(conn->handle);
4987 			req_txp_cp.type = 0x01;
4988 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
4989 				    sizeof(req_txp_cp), &req_txp_cp);
4990 		}
4991 
4992 		err = hci_req_run(&req, conn_info_refresh_complete);
4993 		if (err < 0)
4994 			goto unlock;
4995 
4996 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
4997 				       data, len);
4998 		if (!cmd) {
4999 			err = -ENOMEM;
5000 			goto unlock;
5001 		}
5002 
5003 		hci_conn_hold(conn);
5004 		cmd->user_data = hci_conn_get(conn);
5005 		cmd->cmd_complete = conn_info_cmd_complete;
5006 
5007 		conn->conn_info_timestamp = jiffies;
5008 	} else {
5009 		/* Cache is valid, just reply with values cached in hci_conn */
5010 		rp.rssi = conn->rssi;
5011 		rp.tx_power = conn->tx_power;
5012 		rp.max_tx_power = conn->max_tx_power;
5013 
5014 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5015 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5016 	}
5017 
5018 unlock:
5019 	hci_dev_unlock(hdev);
5020 	return err;
5021 }
5022 
5023 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5024 {
5025 	struct hci_conn *conn = cmd->user_data;
5026 	struct mgmt_rp_get_clock_info rp;
5027 	struct hci_dev *hdev;
5028 	int err;
5029 
5030 	memset(&rp, 0, sizeof(rp));
5031 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5032 
5033 	if (status)
5034 		goto complete;
5035 
5036 	hdev = hci_dev_get(cmd->index);
5037 	if (hdev) {
5038 		rp.local_clock = cpu_to_le32(hdev->clock);
5039 		hci_dev_put(hdev);
5040 	}
5041 
5042 	if (conn) {
5043 		rp.piconet_clock = cpu_to_le32(conn->clock);
5044 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5045 	}
5046 
5047 complete:
5048 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5049 				sizeof(rp));
5050 
5051 	if (conn) {
5052 		hci_conn_drop(conn);
5053 		hci_conn_put(conn);
5054 	}
5055 
5056 	return err;
5057 }
5058 
5059 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5060 {
5061 	struct hci_cp_read_clock *hci_cp;
5062 	struct mgmt_pending_cmd *cmd;
5063 	struct hci_conn *conn;
5064 
5065 	BT_DBG("%s status %u", hdev->name, status);
5066 
5067 	hci_dev_lock(hdev);
5068 
5069 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5070 	if (!hci_cp)
5071 		goto unlock;
5072 
5073 	if (hci_cp->which) {
5074 		u16 handle = __le16_to_cpu(hci_cp->handle);
5075 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5076 	} else {
5077 		conn = NULL;
5078 	}
5079 
5080 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5081 	if (!cmd)
5082 		goto unlock;
5083 
5084 	cmd->cmd_complete(cmd, mgmt_status(status));
5085 	mgmt_pending_remove(cmd);
5086 
5087 unlock:
5088 	hci_dev_unlock(hdev);
5089 }
5090 
5091 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5092 			 u16 len)
5093 {
5094 	struct mgmt_cp_get_clock_info *cp = data;
5095 	struct mgmt_rp_get_clock_info rp;
5096 	struct hci_cp_read_clock hci_cp;
5097 	struct mgmt_pending_cmd *cmd;
5098 	struct hci_request req;
5099 	struct hci_conn *conn;
5100 	int err;
5101 
5102 	BT_DBG("%s", hdev->name);
5103 
5104 	memset(&rp, 0, sizeof(rp));
5105 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5106 	rp.addr.type = cp->addr.type;
5107 
5108 	if (cp->addr.type != BDADDR_BREDR)
5109 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5110 					 MGMT_STATUS_INVALID_PARAMS,
5111 					 &rp, sizeof(rp));
5112 
5113 	hci_dev_lock(hdev);
5114 
5115 	if (!hdev_is_powered(hdev)) {
5116 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5117 					MGMT_STATUS_NOT_POWERED, &rp,
5118 					sizeof(rp));
5119 		goto unlock;
5120 	}
5121 
5122 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5123 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5124 					       &cp->addr.bdaddr);
5125 		if (!conn || conn->state != BT_CONNECTED) {
5126 			err = mgmt_cmd_complete(sk, hdev->id,
5127 						MGMT_OP_GET_CLOCK_INFO,
5128 						MGMT_STATUS_NOT_CONNECTED,
5129 						&rp, sizeof(rp));
5130 			goto unlock;
5131 		}
5132 	} else {
5133 		conn = NULL;
5134 	}
5135 
5136 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5137 	if (!cmd) {
5138 		err = -ENOMEM;
5139 		goto unlock;
5140 	}
5141 
5142 	cmd->cmd_complete = clock_info_cmd_complete;
5143 
5144 	hci_req_init(&req, hdev);
5145 
5146 	memset(&hci_cp, 0, sizeof(hci_cp));
5147 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5148 
5149 	if (conn) {
5150 		hci_conn_hold(conn);
5151 		cmd->user_data = hci_conn_get(conn);
5152 
5153 		hci_cp.handle = cpu_to_le16(conn->handle);
5154 		hci_cp.which = 0x01; /* Piconet clock */
5155 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5156 	}
5157 
5158 	err = hci_req_run(&req, get_clock_info_complete);
5159 	if (err < 0)
5160 		mgmt_pending_remove(cmd);
5161 
5162 unlock:
5163 	hci_dev_unlock(hdev);
5164 	return err;
5165 }
5166 
5167 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5168 {
5169 	struct hci_conn *conn;
5170 
5171 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5172 	if (!conn)
5173 		return false;
5174 
5175 	if (conn->dst_type != type)
5176 		return false;
5177 
5178 	if (conn->state != BT_CONNECTED)
5179 		return false;
5180 
5181 	return true;
5182 }
5183 
5184 /* This function requires the caller holds hdev->lock */
5185 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5186 			       u8 addr_type, u8 auto_connect)
5187 {
5188 	struct hci_conn_params *params;
5189 
5190 	params = hci_conn_params_add(hdev, addr, addr_type);
5191 	if (!params)
5192 		return -EIO;
5193 
5194 	if (params->auto_connect == auto_connect)
5195 		return 0;
5196 
5197 	list_del_init(&params->action);
5198 
5199 	switch (auto_connect) {
5200 	case HCI_AUTO_CONN_DISABLED:
5201 	case HCI_AUTO_CONN_LINK_LOSS:
5202 		/* If auto connect is being disabled when we're trying to
5203 		 * connect to device, keep connecting.
5204 		 */
5205 		if (params->explicit_connect)
5206 			list_add(&params->action, &hdev->pend_le_conns);
5207 		break;
5208 	case HCI_AUTO_CONN_REPORT:
5209 		if (params->explicit_connect)
5210 			list_add(&params->action, &hdev->pend_le_conns);
5211 		else
5212 			list_add(&params->action, &hdev->pend_le_reports);
5213 		break;
5214 	case HCI_AUTO_CONN_DIRECT:
5215 	case HCI_AUTO_CONN_ALWAYS:
5216 		if (!is_connected(hdev, addr, addr_type))
5217 			list_add(&params->action, &hdev->pend_le_conns);
5218 		break;
5219 	}
5220 
5221 	params->auto_connect = auto_connect;
5222 
5223 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5224 	       auto_connect);
5225 
5226 	return 0;
5227 }
5228 
5229 static void device_added(struct sock *sk, struct hci_dev *hdev,
5230 			 bdaddr_t *bdaddr, u8 type, u8 action)
5231 {
5232 	struct mgmt_ev_device_added ev;
5233 
5234 	bacpy(&ev.addr.bdaddr, bdaddr);
5235 	ev.addr.type = type;
5236 	ev.action = action;
5237 
5238 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5239 }
5240 
5241 static int add_device(struct sock *sk, struct hci_dev *hdev,
5242 		      void *data, u16 len)
5243 {
5244 	struct mgmt_cp_add_device *cp = data;
5245 	u8 auto_conn, addr_type;
5246 	int err;
5247 
5248 	BT_DBG("%s", hdev->name);
5249 
5250 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5251 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5252 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5253 					 MGMT_STATUS_INVALID_PARAMS,
5254 					 &cp->addr, sizeof(cp->addr));
5255 
5256 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5257 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5258 					 MGMT_STATUS_INVALID_PARAMS,
5259 					 &cp->addr, sizeof(cp->addr));
5260 
5261 	hci_dev_lock(hdev);
5262 
5263 	if (cp->addr.type == BDADDR_BREDR) {
5264 		/* Only incoming connections action is supported for now */
5265 		if (cp->action != 0x01) {
5266 			err = mgmt_cmd_complete(sk, hdev->id,
5267 						MGMT_OP_ADD_DEVICE,
5268 						MGMT_STATUS_INVALID_PARAMS,
5269 						&cp->addr, sizeof(cp->addr));
5270 			goto unlock;
5271 		}
5272 
5273 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5274 					  cp->addr.type);
5275 		if (err)
5276 			goto unlock;
5277 
5278 		hci_req_update_scan(hdev);
5279 
5280 		goto added;
5281 	}
5282 
5283 	addr_type = le_addr_type(cp->addr.type);
5284 
5285 	if (cp->action == 0x02)
5286 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5287 	else if (cp->action == 0x01)
5288 		auto_conn = HCI_AUTO_CONN_DIRECT;
5289 	else
5290 		auto_conn = HCI_AUTO_CONN_REPORT;
5291 
5292 	/* Kernel internally uses conn_params with resolvable private
5293 	 * address, but Add Device allows only identity addresses.
5294 	 * Make sure it is enforced before calling
5295 	 * hci_conn_params_lookup.
5296 	 */
5297 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5298 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 					MGMT_STATUS_INVALID_PARAMS,
5300 					&cp->addr, sizeof(cp->addr));
5301 		goto unlock;
5302 	}
5303 
5304 	/* If the connection parameters don't exist for this device,
5305 	 * they will be created and configured with defaults.
5306 	 */
5307 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5308 				auto_conn) < 0) {
5309 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5310 					MGMT_STATUS_FAILED, &cp->addr,
5311 					sizeof(cp->addr));
5312 		goto unlock;
5313 	}
5314 
5315 	hci_update_background_scan(hdev);
5316 
5317 added:
5318 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5319 
5320 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5321 				MGMT_STATUS_SUCCESS, &cp->addr,
5322 				sizeof(cp->addr));
5323 
5324 unlock:
5325 	hci_dev_unlock(hdev);
5326 	return err;
5327 }
5328 
5329 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5330 			   bdaddr_t *bdaddr, u8 type)
5331 {
5332 	struct mgmt_ev_device_removed ev;
5333 
5334 	bacpy(&ev.addr.bdaddr, bdaddr);
5335 	ev.addr.type = type;
5336 
5337 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5338 }
5339 
5340 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5341 			 void *data, u16 len)
5342 {
5343 	struct mgmt_cp_remove_device *cp = data;
5344 	int err;
5345 
5346 	BT_DBG("%s", hdev->name);
5347 
5348 	hci_dev_lock(hdev);
5349 
5350 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5351 		struct hci_conn_params *params;
5352 		u8 addr_type;
5353 
5354 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5355 			err = mgmt_cmd_complete(sk, hdev->id,
5356 						MGMT_OP_REMOVE_DEVICE,
5357 						MGMT_STATUS_INVALID_PARAMS,
5358 						&cp->addr, sizeof(cp->addr));
5359 			goto unlock;
5360 		}
5361 
5362 		if (cp->addr.type == BDADDR_BREDR) {
5363 			err = hci_bdaddr_list_del(&hdev->whitelist,
5364 						  &cp->addr.bdaddr,
5365 						  cp->addr.type);
5366 			if (err) {
5367 				err = mgmt_cmd_complete(sk, hdev->id,
5368 							MGMT_OP_REMOVE_DEVICE,
5369 							MGMT_STATUS_INVALID_PARAMS,
5370 							&cp->addr,
5371 							sizeof(cp->addr));
5372 				goto unlock;
5373 			}
5374 
5375 			hci_req_update_scan(hdev);
5376 
5377 			device_removed(sk, hdev, &cp->addr.bdaddr,
5378 				       cp->addr.type);
5379 			goto complete;
5380 		}
5381 
5382 		addr_type = le_addr_type(cp->addr.type);
5383 
5384 		/* Kernel internally uses conn_params with resolvable private
5385 		 * address, but Remove Device allows only identity addresses.
5386 		 * Make sure it is enforced before calling
5387 		 * hci_conn_params_lookup.
5388 		 */
5389 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5390 			err = mgmt_cmd_complete(sk, hdev->id,
5391 						MGMT_OP_REMOVE_DEVICE,
5392 						MGMT_STATUS_INVALID_PARAMS,
5393 						&cp->addr, sizeof(cp->addr));
5394 			goto unlock;
5395 		}
5396 
5397 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5398 						addr_type);
5399 		if (!params) {
5400 			err = mgmt_cmd_complete(sk, hdev->id,
5401 						MGMT_OP_REMOVE_DEVICE,
5402 						MGMT_STATUS_INVALID_PARAMS,
5403 						&cp->addr, sizeof(cp->addr));
5404 			goto unlock;
5405 		}
5406 
5407 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5408 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5409 			err = mgmt_cmd_complete(sk, hdev->id,
5410 						MGMT_OP_REMOVE_DEVICE,
5411 						MGMT_STATUS_INVALID_PARAMS,
5412 						&cp->addr, sizeof(cp->addr));
5413 			goto unlock;
5414 		}
5415 
5416 		list_del(&params->action);
5417 		list_del(&params->list);
5418 		kfree(params);
5419 		hci_update_background_scan(hdev);
5420 
5421 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5422 	} else {
5423 		struct hci_conn_params *p, *tmp;
5424 		struct bdaddr_list *b, *btmp;
5425 
5426 		if (cp->addr.type) {
5427 			err = mgmt_cmd_complete(sk, hdev->id,
5428 						MGMT_OP_REMOVE_DEVICE,
5429 						MGMT_STATUS_INVALID_PARAMS,
5430 						&cp->addr, sizeof(cp->addr));
5431 			goto unlock;
5432 		}
5433 
5434 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5435 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5436 			list_del(&b->list);
5437 			kfree(b);
5438 		}
5439 
5440 		hci_req_update_scan(hdev);
5441 
5442 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5443 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5444 				continue;
5445 			device_removed(sk, hdev, &p->addr, p->addr_type);
5446 			if (p->explicit_connect) {
5447 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5448 				continue;
5449 			}
5450 			list_del(&p->action);
5451 			list_del(&p->list);
5452 			kfree(p);
5453 		}
5454 
5455 		BT_DBG("All LE connection parameters were removed");
5456 
5457 		hci_update_background_scan(hdev);
5458 	}
5459 
5460 complete:
5461 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5462 				MGMT_STATUS_SUCCESS, &cp->addr,
5463 				sizeof(cp->addr));
5464 unlock:
5465 	hci_dev_unlock(hdev);
5466 	return err;
5467 }
5468 
5469 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5470 			   u16 len)
5471 {
5472 	struct mgmt_cp_load_conn_param *cp = data;
5473 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5474 				     sizeof(struct mgmt_conn_param));
5475 	u16 param_count, expected_len;
5476 	int i;
5477 
5478 	if (!lmp_le_capable(hdev))
5479 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5480 				       MGMT_STATUS_NOT_SUPPORTED);
5481 
5482 	param_count = __le16_to_cpu(cp->param_count);
5483 	if (param_count > max_param_count) {
5484 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5485 			   param_count);
5486 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5487 				       MGMT_STATUS_INVALID_PARAMS);
5488 	}
5489 
5490 	expected_len = sizeof(*cp) + param_count *
5491 					sizeof(struct mgmt_conn_param);
5492 	if (expected_len != len) {
5493 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5494 			   expected_len, len);
5495 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5496 				       MGMT_STATUS_INVALID_PARAMS);
5497 	}
5498 
5499 	BT_DBG("%s param_count %u", hdev->name, param_count);
5500 
5501 	hci_dev_lock(hdev);
5502 
5503 	hci_conn_params_clear_disabled(hdev);
5504 
5505 	for (i = 0; i < param_count; i++) {
5506 		struct mgmt_conn_param *param = &cp->params[i];
5507 		struct hci_conn_params *hci_param;
5508 		u16 min, max, latency, timeout;
5509 		u8 addr_type;
5510 
5511 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5512 		       param->addr.type);
5513 
5514 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5515 			addr_type = ADDR_LE_DEV_PUBLIC;
5516 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5517 			addr_type = ADDR_LE_DEV_RANDOM;
5518 		} else {
5519 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5520 			continue;
5521 		}
5522 
5523 		min = le16_to_cpu(param->min_interval);
5524 		max = le16_to_cpu(param->max_interval);
5525 		latency = le16_to_cpu(param->latency);
5526 		timeout = le16_to_cpu(param->timeout);
5527 
5528 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5529 		       min, max, latency, timeout);
5530 
5531 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5532 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5533 			continue;
5534 		}
5535 
5536 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5537 						addr_type);
5538 		if (!hci_param) {
5539 			bt_dev_err(hdev, "failed to add connection parameters");
5540 			continue;
5541 		}
5542 
5543 		hci_param->conn_min_interval = min;
5544 		hci_param->conn_max_interval = max;
5545 		hci_param->conn_latency = latency;
5546 		hci_param->supervision_timeout = timeout;
5547 	}
5548 
5549 	hci_dev_unlock(hdev);
5550 
5551 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5552 				 NULL, 0);
5553 }
5554 
5555 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5556 			       void *data, u16 len)
5557 {
5558 	struct mgmt_cp_set_external_config *cp = data;
5559 	bool changed;
5560 	int err;
5561 
5562 	BT_DBG("%s", hdev->name);
5563 
5564 	if (hdev_is_powered(hdev))
5565 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5566 				       MGMT_STATUS_REJECTED);
5567 
5568 	if (cp->config != 0x00 && cp->config != 0x01)
5569 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5570 				         MGMT_STATUS_INVALID_PARAMS);
5571 
5572 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5573 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5574 				       MGMT_STATUS_NOT_SUPPORTED);
5575 
5576 	hci_dev_lock(hdev);
5577 
5578 	if (cp->config)
5579 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
5580 	else
5581 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
5582 
5583 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5584 	if (err < 0)
5585 		goto unlock;
5586 
5587 	if (!changed)
5588 		goto unlock;
5589 
5590 	err = new_options(hdev, sk);
5591 
5592 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
5593 		mgmt_index_removed(hdev);
5594 
5595 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
5596 			hci_dev_set_flag(hdev, HCI_CONFIG);
5597 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5598 
5599 			queue_work(hdev->req_workqueue, &hdev->power_on);
5600 		} else {
5601 			set_bit(HCI_RAW, &hdev->flags);
5602 			mgmt_index_added(hdev);
5603 		}
5604 	}
5605 
5606 unlock:
5607 	hci_dev_unlock(hdev);
5608 	return err;
5609 }
5610 
5611 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5612 			      void *data, u16 len)
5613 {
5614 	struct mgmt_cp_set_public_address *cp = data;
5615 	bool changed;
5616 	int err;
5617 
5618 	BT_DBG("%s", hdev->name);
5619 
5620 	if (hdev_is_powered(hdev))
5621 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5622 				       MGMT_STATUS_REJECTED);
5623 
5624 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5625 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5626 				       MGMT_STATUS_INVALID_PARAMS);
5627 
5628 	if (!hdev->set_bdaddr)
5629 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5630 				       MGMT_STATUS_NOT_SUPPORTED);
5631 
5632 	hci_dev_lock(hdev);
5633 
5634 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5635 	bacpy(&hdev->public_addr, &cp->bdaddr);
5636 
5637 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5638 	if (err < 0)
5639 		goto unlock;
5640 
5641 	if (!changed)
5642 		goto unlock;
5643 
5644 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5645 		err = new_options(hdev, sk);
5646 
5647 	if (is_configured(hdev)) {
5648 		mgmt_index_removed(hdev);
5649 
5650 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
5651 
5652 		hci_dev_set_flag(hdev, HCI_CONFIG);
5653 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
5654 
5655 		queue_work(hdev->req_workqueue, &hdev->power_on);
5656 	}
5657 
5658 unlock:
5659 	hci_dev_unlock(hdev);
5660 	return err;
5661 }
5662 
5663 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
5664 					     u16 opcode, struct sk_buff *skb)
5665 {
5666 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
5667 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
5668 	u8 *h192, *r192, *h256, *r256;
5669 	struct mgmt_pending_cmd *cmd;
5670 	u16 eir_len;
5671 	int err;
5672 
5673 	BT_DBG("%s status %u", hdev->name, status);
5674 
5675 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
5676 	if (!cmd)
5677 		return;
5678 
5679 	mgmt_cp = cmd->param;
5680 
5681 	if (status) {
5682 		status = mgmt_status(status);
5683 		eir_len = 0;
5684 
5685 		h192 = NULL;
5686 		r192 = NULL;
5687 		h256 = NULL;
5688 		r256 = NULL;
5689 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
5690 		struct hci_rp_read_local_oob_data *rp;
5691 
5692 		if (skb->len != sizeof(*rp)) {
5693 			status = MGMT_STATUS_FAILED;
5694 			eir_len = 0;
5695 		} else {
5696 			status = MGMT_STATUS_SUCCESS;
5697 			rp = (void *)skb->data;
5698 
5699 			eir_len = 5 + 18 + 18;
5700 			h192 = rp->hash;
5701 			r192 = rp->rand;
5702 			h256 = NULL;
5703 			r256 = NULL;
5704 		}
5705 	} else {
5706 		struct hci_rp_read_local_oob_ext_data *rp;
5707 
5708 		if (skb->len != sizeof(*rp)) {
5709 			status = MGMT_STATUS_FAILED;
5710 			eir_len = 0;
5711 		} else {
5712 			status = MGMT_STATUS_SUCCESS;
5713 			rp = (void *)skb->data;
5714 
5715 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5716 				eir_len = 5 + 18 + 18;
5717 				h192 = NULL;
5718 				r192 = NULL;
5719 			} else {
5720 				eir_len = 5 + 18 + 18 + 18 + 18;
5721 				h192 = rp->hash192;
5722 				r192 = rp->rand192;
5723 			}
5724 
5725 			h256 = rp->hash256;
5726 			r256 = rp->rand256;
5727 		}
5728 	}
5729 
5730 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
5731 	if (!mgmt_rp)
5732 		goto done;
5733 
5734 	if (status)
5735 		goto send_rsp;
5736 
5737 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
5738 				  hdev->dev_class, 3);
5739 
5740 	if (h192 && r192) {
5741 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5742 					  EIR_SSP_HASH_C192, h192, 16);
5743 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5744 					  EIR_SSP_RAND_R192, r192, 16);
5745 	}
5746 
5747 	if (h256 && r256) {
5748 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5749 					  EIR_SSP_HASH_C256, h256, 16);
5750 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
5751 					  EIR_SSP_RAND_R256, r256, 16);
5752 	}
5753 
5754 send_rsp:
5755 	mgmt_rp->type = mgmt_cp->type;
5756 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
5757 
5758 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
5759 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
5760 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
5761 	if (err < 0 || status)
5762 		goto done;
5763 
5764 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
5765 
5766 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5767 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
5768 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
5769 done:
5770 	kfree(mgmt_rp);
5771 	mgmt_pending_remove(cmd);
5772 }
5773 
5774 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
5775 				  struct mgmt_cp_read_local_oob_ext_data *cp)
5776 {
5777 	struct mgmt_pending_cmd *cmd;
5778 	struct hci_request req;
5779 	int err;
5780 
5781 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
5782 			       cp, sizeof(*cp));
5783 	if (!cmd)
5784 		return -ENOMEM;
5785 
5786 	hci_req_init(&req, hdev);
5787 
5788 	if (bredr_sc_enabled(hdev))
5789 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
5790 	else
5791 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
5792 
5793 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
5794 	if (err < 0) {
5795 		mgmt_pending_remove(cmd);
5796 		return err;
5797 	}
5798 
5799 	return 0;
5800 }
5801 
5802 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
5803 				   void *data, u16 data_len)
5804 {
5805 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
5806 	struct mgmt_rp_read_local_oob_ext_data *rp;
5807 	size_t rp_len;
5808 	u16 eir_len;
5809 	u8 status, flags, role, addr[7], hash[16], rand[16];
5810 	int err;
5811 
5812 	BT_DBG("%s", hdev->name);
5813 
5814 	if (hdev_is_powered(hdev)) {
5815 		switch (cp->type) {
5816 		case BIT(BDADDR_BREDR):
5817 			status = mgmt_bredr_support(hdev);
5818 			if (status)
5819 				eir_len = 0;
5820 			else
5821 				eir_len = 5;
5822 			break;
5823 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5824 			status = mgmt_le_support(hdev);
5825 			if (status)
5826 				eir_len = 0;
5827 			else
5828 				eir_len = 9 + 3 + 18 + 18 + 3;
5829 			break;
5830 		default:
5831 			status = MGMT_STATUS_INVALID_PARAMS;
5832 			eir_len = 0;
5833 			break;
5834 		}
5835 	} else {
5836 		status = MGMT_STATUS_NOT_POWERED;
5837 		eir_len = 0;
5838 	}
5839 
5840 	rp_len = sizeof(*rp) + eir_len;
5841 	rp = kmalloc(rp_len, GFP_ATOMIC);
5842 	if (!rp)
5843 		return -ENOMEM;
5844 
5845 	if (status)
5846 		goto complete;
5847 
5848 	hci_dev_lock(hdev);
5849 
5850 	eir_len = 0;
5851 	switch (cp->type) {
5852 	case BIT(BDADDR_BREDR):
5853 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5854 			err = read_local_ssp_oob_req(hdev, sk, cp);
5855 			hci_dev_unlock(hdev);
5856 			if (!err)
5857 				goto done;
5858 
5859 			status = MGMT_STATUS_FAILED;
5860 			goto complete;
5861 		} else {
5862 			eir_len = eir_append_data(rp->eir, eir_len,
5863 						  EIR_CLASS_OF_DEV,
5864 						  hdev->dev_class, 3);
5865 		}
5866 		break;
5867 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
5868 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5869 		    smp_generate_oob(hdev, hash, rand) < 0) {
5870 			hci_dev_unlock(hdev);
5871 			status = MGMT_STATUS_FAILED;
5872 			goto complete;
5873 		}
5874 
5875 		/* This should return the active RPA, but since the RPA
5876 		 * is only programmed on demand, it is really hard to fill
5877 		 * this in at the moment. For now disallow retrieving
5878 		 * local out-of-band data when privacy is in use.
5879 		 *
5880 		 * Returning the identity address will not help here since
5881 		 * pairing happens before the identity resolving key is
5882 		 * known and thus the connection establishment happens
5883 		 * based on the RPA and not the identity address.
5884 		 */
5885 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5886 			hci_dev_unlock(hdev);
5887 			status = MGMT_STATUS_REJECTED;
5888 			goto complete;
5889 		}
5890 
5891 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
5892 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
5893 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5894 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
5895 			memcpy(addr, &hdev->static_addr, 6);
5896 			addr[6] = 0x01;
5897 		} else {
5898 			memcpy(addr, &hdev->bdaddr, 6);
5899 			addr[6] = 0x00;
5900 		}
5901 
5902 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
5903 					  addr, sizeof(addr));
5904 
5905 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5906 			role = 0x02;
5907 		else
5908 			role = 0x01;
5909 
5910 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
5911 					  &role, sizeof(role));
5912 
5913 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
5914 			eir_len = eir_append_data(rp->eir, eir_len,
5915 						  EIR_LE_SC_CONFIRM,
5916 						  hash, sizeof(hash));
5917 
5918 			eir_len = eir_append_data(rp->eir, eir_len,
5919 						  EIR_LE_SC_RANDOM,
5920 						  rand, sizeof(rand));
5921 		}
5922 
5923 		flags = mgmt_get_adv_discov_flags(hdev);
5924 
5925 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5926 			flags |= LE_AD_NO_BREDR;
5927 
5928 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
5929 					  &flags, sizeof(flags));
5930 		break;
5931 	}
5932 
5933 	hci_dev_unlock(hdev);
5934 
5935 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
5936 
5937 	status = MGMT_STATUS_SUCCESS;
5938 
5939 complete:
5940 	rp->type = cp->type;
5941 	rp->eir_len = cpu_to_le16(eir_len);
5942 
5943 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
5944 				status, rp, sizeof(*rp) + eir_len);
5945 	if (err < 0 || status)
5946 		goto done;
5947 
5948 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
5949 				 rp, sizeof(*rp) + eir_len,
5950 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
5951 
5952 done:
5953 	kfree(rp);
5954 
5955 	return err;
5956 }
5957 
5958 static u32 get_supported_adv_flags(struct hci_dev *hdev)
5959 {
5960 	u32 flags = 0;
5961 
5962 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
5963 	flags |= MGMT_ADV_FLAG_DISCOV;
5964 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
5965 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
5966 	flags |= MGMT_ADV_FLAG_APPEARANCE;
5967 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
5968 
5969 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID)
5970 		flags |= MGMT_ADV_FLAG_TX_POWER;
5971 
5972 	return flags;
5973 }
5974 
5975 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
5976 			     void *data, u16 data_len)
5977 {
5978 	struct mgmt_rp_read_adv_features *rp;
5979 	size_t rp_len;
5980 	int err;
5981 	struct adv_info *adv_instance;
5982 	u32 supported_flags;
5983 	u8 *instance;
5984 
5985 	BT_DBG("%s", hdev->name);
5986 
5987 	if (!lmp_le_capable(hdev))
5988 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
5989 				       MGMT_STATUS_REJECTED);
5990 
5991 	hci_dev_lock(hdev);
5992 
5993 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
5994 	rp = kmalloc(rp_len, GFP_ATOMIC);
5995 	if (!rp) {
5996 		hci_dev_unlock(hdev);
5997 		return -ENOMEM;
5998 	}
5999 
6000 	supported_flags = get_supported_adv_flags(hdev);
6001 
6002 	rp->supported_flags = cpu_to_le32(supported_flags);
6003 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6004 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6005 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6006 	rp->num_instances = hdev->adv_instance_cnt;
6007 
6008 	instance = rp->instance;
6009 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6010 		*instance = adv_instance->instance;
6011 		instance++;
6012 	}
6013 
6014 	hci_dev_unlock(hdev);
6015 
6016 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6017 				MGMT_STATUS_SUCCESS, rp, rp_len);
6018 
6019 	kfree(rp);
6020 
6021 	return err;
6022 }
6023 
6024 static u8 calculate_name_len(struct hci_dev *hdev)
6025 {
6026 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6027 
6028 	return append_local_name(hdev, buf, 0);
6029 }
6030 
6031 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6032 			   bool is_adv_data)
6033 {
6034 	u8 max_len = HCI_MAX_AD_LENGTH;
6035 
6036 	if (is_adv_data) {
6037 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6038 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6039 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6040 			max_len -= 3;
6041 
6042 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6043 			max_len -= 3;
6044 	} else {
6045 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6046 			max_len -= calculate_name_len(hdev);
6047 
6048 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6049 			max_len -= 4;
6050 	}
6051 
6052 	return max_len;
6053 }
6054 
6055 static bool flags_managed(u32 adv_flags)
6056 {
6057 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6058 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6059 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6060 }
6061 
6062 static bool tx_power_managed(u32 adv_flags)
6063 {
6064 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6065 }
6066 
6067 static bool name_managed(u32 adv_flags)
6068 {
6069 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6070 }
6071 
6072 static bool appearance_managed(u32 adv_flags)
6073 {
6074 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6075 }
6076 
6077 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6078 			      u8 len, bool is_adv_data)
6079 {
6080 	int i, cur_len;
6081 	u8 max_len;
6082 
6083 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6084 
6085 	if (len > max_len)
6086 		return false;
6087 
6088 	/* Make sure that the data is correctly formatted. */
6089 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6090 		cur_len = data[i];
6091 
6092 		if (data[i + 1] == EIR_FLAGS &&
6093 		    (!is_adv_data || flags_managed(adv_flags)))
6094 			return false;
6095 
6096 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6097 			return false;
6098 
6099 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6100 			return false;
6101 
6102 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6103 			return false;
6104 
6105 		if (data[i + 1] == EIR_APPEARANCE &&
6106 		    appearance_managed(adv_flags))
6107 			return false;
6108 
6109 		/* If the current field length would exceed the total data
6110 		 * length, then it's invalid.
6111 		 */
6112 		if (i + cur_len >= len)
6113 			return false;
6114 	}
6115 
6116 	return true;
6117 }
6118 
6119 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6120 				     u16 opcode)
6121 {
6122 	struct mgmt_pending_cmd *cmd;
6123 	struct mgmt_cp_add_advertising *cp;
6124 	struct mgmt_rp_add_advertising rp;
6125 	struct adv_info *adv_instance, *n;
6126 	u8 instance;
6127 
6128 	BT_DBG("status %d", status);
6129 
6130 	hci_dev_lock(hdev);
6131 
6132 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6133 
6134 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6135 		if (!adv_instance->pending)
6136 			continue;
6137 
6138 		if (!status) {
6139 			adv_instance->pending = false;
6140 			continue;
6141 		}
6142 
6143 		instance = adv_instance->instance;
6144 
6145 		if (hdev->cur_adv_instance == instance)
6146 			cancel_adv_timeout(hdev);
6147 
6148 		hci_remove_adv_instance(hdev, instance);
6149 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6150 	}
6151 
6152 	if (!cmd)
6153 		goto unlock;
6154 
6155 	cp = cmd->param;
6156 	rp.instance = cp->instance;
6157 
6158 	if (status)
6159 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6160 				mgmt_status(status));
6161 	else
6162 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6163 				  mgmt_status(status), &rp, sizeof(rp));
6164 
6165 	mgmt_pending_remove(cmd);
6166 
6167 unlock:
6168 	hci_dev_unlock(hdev);
6169 }
6170 
6171 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6172 			   void *data, u16 data_len)
6173 {
6174 	struct mgmt_cp_add_advertising *cp = data;
6175 	struct mgmt_rp_add_advertising rp;
6176 	u32 flags;
6177 	u32 supported_flags;
6178 	u8 status;
6179 	u16 timeout, duration;
6180 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6181 	u8 schedule_instance = 0;
6182 	struct adv_info *next_instance;
6183 	int err;
6184 	struct mgmt_pending_cmd *cmd;
6185 	struct hci_request req;
6186 
6187 	BT_DBG("%s", hdev->name);
6188 
6189 	status = mgmt_le_support(hdev);
6190 	if (status)
6191 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6192 				       status);
6193 
6194 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6195 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6196 				       MGMT_STATUS_INVALID_PARAMS);
6197 
6198 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6199 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6200 				       MGMT_STATUS_INVALID_PARAMS);
6201 
6202 	flags = __le32_to_cpu(cp->flags);
6203 	timeout = __le16_to_cpu(cp->timeout);
6204 	duration = __le16_to_cpu(cp->duration);
6205 
6206 	/* The current implementation only supports a subset of the specified
6207 	 * flags.
6208 	 */
6209 	supported_flags = get_supported_adv_flags(hdev);
6210 	if (flags & ~supported_flags)
6211 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6212 				       MGMT_STATUS_INVALID_PARAMS);
6213 
6214 	hci_dev_lock(hdev);
6215 
6216 	if (timeout && !hdev_is_powered(hdev)) {
6217 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6218 				      MGMT_STATUS_REJECTED);
6219 		goto unlock;
6220 	}
6221 
6222 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6223 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6224 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6225 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6226 				      MGMT_STATUS_BUSY);
6227 		goto unlock;
6228 	}
6229 
6230 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6231 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6232 			       cp->scan_rsp_len, false)) {
6233 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6234 				      MGMT_STATUS_INVALID_PARAMS);
6235 		goto unlock;
6236 	}
6237 
6238 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6239 				   cp->adv_data_len, cp->data,
6240 				   cp->scan_rsp_len,
6241 				   cp->data + cp->adv_data_len,
6242 				   timeout, duration);
6243 	if (err < 0) {
6244 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6245 				      MGMT_STATUS_FAILED);
6246 		goto unlock;
6247 	}
6248 
6249 	/* Only trigger an advertising added event if a new instance was
6250 	 * actually added.
6251 	 */
6252 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6253 		mgmt_advertising_added(sk, hdev, cp->instance);
6254 
6255 	if (hdev->cur_adv_instance == cp->instance) {
6256 		/* If the currently advertised instance is being changed then
6257 		 * cancel the current advertising and schedule the next
6258 		 * instance. If there is only one instance then the overridden
6259 		 * advertising data will be visible right away.
6260 		 */
6261 		cancel_adv_timeout(hdev);
6262 
6263 		next_instance = hci_get_next_instance(hdev, cp->instance);
6264 		if (next_instance)
6265 			schedule_instance = next_instance->instance;
6266 	} else if (!hdev->adv_instance_timeout) {
6267 		/* Immediately advertise the new instance if no other
6268 		 * instance is currently being advertised.
6269 		 */
6270 		schedule_instance = cp->instance;
6271 	}
6272 
6273 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6274 	 * there is no instance to be advertised then we have no HCI
6275 	 * communication to make. Simply return.
6276 	 */
6277 	if (!hdev_is_powered(hdev) ||
6278 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6279 	    !schedule_instance) {
6280 		rp.instance = cp->instance;
6281 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6282 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6283 		goto unlock;
6284 	}
6285 
6286 	/* We're good to go, update advertising data, parameters, and start
6287 	 * advertising.
6288 	 */
6289 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6290 			       data_len);
6291 	if (!cmd) {
6292 		err = -ENOMEM;
6293 		goto unlock;
6294 	}
6295 
6296 	hci_req_init(&req, hdev);
6297 
6298 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6299 
6300 	if (!err)
6301 		err = hci_req_run(&req, add_advertising_complete);
6302 
6303 	if (err < 0)
6304 		mgmt_pending_remove(cmd);
6305 
6306 unlock:
6307 	hci_dev_unlock(hdev);
6308 
6309 	return err;
6310 }
6311 
6312 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6313 					u16 opcode)
6314 {
6315 	struct mgmt_pending_cmd *cmd;
6316 	struct mgmt_cp_remove_advertising *cp;
6317 	struct mgmt_rp_remove_advertising rp;
6318 
6319 	BT_DBG("status %d", status);
6320 
6321 	hci_dev_lock(hdev);
6322 
6323 	/* A failure status here only means that we failed to disable
6324 	 * advertising. Otherwise, the advertising instance has been removed,
6325 	 * so report success.
6326 	 */
6327 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6328 	if (!cmd)
6329 		goto unlock;
6330 
6331 	cp = cmd->param;
6332 	rp.instance = cp->instance;
6333 
6334 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6335 			  &rp, sizeof(rp));
6336 	mgmt_pending_remove(cmd);
6337 
6338 unlock:
6339 	hci_dev_unlock(hdev);
6340 }
6341 
6342 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6343 			      void *data, u16 data_len)
6344 {
6345 	struct mgmt_cp_remove_advertising *cp = data;
6346 	struct mgmt_rp_remove_advertising rp;
6347 	struct mgmt_pending_cmd *cmd;
6348 	struct hci_request req;
6349 	int err;
6350 
6351 	BT_DBG("%s", hdev->name);
6352 
6353 	hci_dev_lock(hdev);
6354 
6355 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6356 		err = mgmt_cmd_status(sk, hdev->id,
6357 				      MGMT_OP_REMOVE_ADVERTISING,
6358 				      MGMT_STATUS_INVALID_PARAMS);
6359 		goto unlock;
6360 	}
6361 
6362 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6363 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6364 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6365 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6366 				      MGMT_STATUS_BUSY);
6367 		goto unlock;
6368 	}
6369 
6370 	if (list_empty(&hdev->adv_instances)) {
6371 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6372 				      MGMT_STATUS_INVALID_PARAMS);
6373 		goto unlock;
6374 	}
6375 
6376 	hci_req_init(&req, hdev);
6377 
6378 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6379 
6380 	if (list_empty(&hdev->adv_instances))
6381 		__hci_req_disable_advertising(&req);
6382 
6383 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
6384 	 * flag is set or the device isn't powered then we have no HCI
6385 	 * communication to make. Simply return.
6386 	 */
6387 	if (skb_queue_empty(&req.cmd_q) ||
6388 	    !hdev_is_powered(hdev) ||
6389 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6390 		hci_req_purge(&req);
6391 		rp.instance = cp->instance;
6392 		err = mgmt_cmd_complete(sk, hdev->id,
6393 					MGMT_OP_REMOVE_ADVERTISING,
6394 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6395 		goto unlock;
6396 	}
6397 
6398 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6399 			       data_len);
6400 	if (!cmd) {
6401 		err = -ENOMEM;
6402 		goto unlock;
6403 	}
6404 
6405 	err = hci_req_run(&req, remove_advertising_complete);
6406 	if (err < 0)
6407 		mgmt_pending_remove(cmd);
6408 
6409 unlock:
6410 	hci_dev_unlock(hdev);
6411 
6412 	return err;
6413 }
6414 
6415 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6416 			     void *data, u16 data_len)
6417 {
6418 	struct mgmt_cp_get_adv_size_info *cp = data;
6419 	struct mgmt_rp_get_adv_size_info rp;
6420 	u32 flags, supported_flags;
6421 	int err;
6422 
6423 	BT_DBG("%s", hdev->name);
6424 
6425 	if (!lmp_le_capable(hdev))
6426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6427 				       MGMT_STATUS_REJECTED);
6428 
6429 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6431 				       MGMT_STATUS_INVALID_PARAMS);
6432 
6433 	flags = __le32_to_cpu(cp->flags);
6434 
6435 	/* The current implementation only supports a subset of the specified
6436 	 * flags.
6437 	 */
6438 	supported_flags = get_supported_adv_flags(hdev);
6439 	if (flags & ~supported_flags)
6440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6441 				       MGMT_STATUS_INVALID_PARAMS);
6442 
6443 	rp.instance = cp->instance;
6444 	rp.flags = cp->flags;
6445 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6446 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6447 
6448 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6449 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6450 
6451 	return err;
6452 }
6453 
6454 static const struct hci_mgmt_handler mgmt_handlers[] = {
6455 	{ NULL }, /* 0x0000 (no command) */
6456 	{ read_version,            MGMT_READ_VERSION_SIZE,
6457 						HCI_MGMT_NO_HDEV |
6458 						HCI_MGMT_UNTRUSTED },
6459 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6460 						HCI_MGMT_NO_HDEV |
6461 						HCI_MGMT_UNTRUSTED },
6462 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6463 						HCI_MGMT_NO_HDEV |
6464 						HCI_MGMT_UNTRUSTED },
6465 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
6466 						HCI_MGMT_UNTRUSTED },
6467 	{ set_powered,             MGMT_SETTING_SIZE },
6468 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
6469 	{ set_connectable,         MGMT_SETTING_SIZE },
6470 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
6471 	{ set_bondable,            MGMT_SETTING_SIZE },
6472 	{ set_link_security,       MGMT_SETTING_SIZE },
6473 	{ set_ssp,                 MGMT_SETTING_SIZE },
6474 	{ set_hs,                  MGMT_SETTING_SIZE },
6475 	{ set_le,                  MGMT_SETTING_SIZE },
6476 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
6477 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
6478 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
6479 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6480 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
6481 						HCI_MGMT_VAR_LEN },
6482 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6483 						HCI_MGMT_VAR_LEN },
6484 	{ disconnect,              MGMT_DISCONNECT_SIZE },
6485 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
6486 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
6487 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
6488 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
6489 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
6490 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
6491 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
6492 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
6493 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6494 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
6495 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6496 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
6497 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6498 						HCI_MGMT_VAR_LEN },
6499 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6500 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
6501 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
6502 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
6503 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
6504 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
6505 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
6506 	{ set_advertising,         MGMT_SETTING_SIZE },
6507 	{ set_bredr,               MGMT_SETTING_SIZE },
6508 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
6509 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
6510 	{ set_secure_conn,         MGMT_SETTING_SIZE },
6511 	{ set_debug_keys,          MGMT_SETTING_SIZE },
6512 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6513 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
6514 						HCI_MGMT_VAR_LEN },
6515 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
6516 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
6517 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
6518 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6519 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
6520 						HCI_MGMT_VAR_LEN },
6521 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6522 						HCI_MGMT_NO_HDEV |
6523 						HCI_MGMT_UNTRUSTED },
6524 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6525 						HCI_MGMT_UNCONFIGURED |
6526 						HCI_MGMT_UNTRUSTED },
6527 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
6528 						HCI_MGMT_UNCONFIGURED },
6529 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
6530 						HCI_MGMT_UNCONFIGURED },
6531 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6532 						HCI_MGMT_VAR_LEN },
6533 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6534 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6535 						HCI_MGMT_NO_HDEV |
6536 						HCI_MGMT_UNTRUSTED },
6537 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6538 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
6539 						HCI_MGMT_VAR_LEN },
6540 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
6541 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
6542 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6543 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6544 						HCI_MGMT_UNTRUSTED },
6545 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
6546 };
6547 
6548 void mgmt_index_added(struct hci_dev *hdev)
6549 {
6550 	struct mgmt_ev_ext_index ev;
6551 
6552 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6553 		return;
6554 
6555 	switch (hdev->dev_type) {
6556 	case HCI_PRIMARY:
6557 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6558 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6559 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6560 			ev.type = 0x01;
6561 		} else {
6562 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6563 					 HCI_MGMT_INDEX_EVENTS);
6564 			ev.type = 0x00;
6565 		}
6566 		break;
6567 	case HCI_AMP:
6568 		ev.type = 0x02;
6569 		break;
6570 	default:
6571 		return;
6572 	}
6573 
6574 	ev.bus = hdev->bus;
6575 
6576 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6577 			 HCI_MGMT_EXT_INDEX_EVENTS);
6578 }
6579 
6580 void mgmt_index_removed(struct hci_dev *hdev)
6581 {
6582 	struct mgmt_ev_ext_index ev;
6583 	u8 status = MGMT_STATUS_INVALID_INDEX;
6584 
6585 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6586 		return;
6587 
6588 	switch (hdev->dev_type) {
6589 	case HCI_PRIMARY:
6590 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6591 
6592 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6593 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6594 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6595 			ev.type = 0x01;
6596 		} else {
6597 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6598 					 HCI_MGMT_INDEX_EVENTS);
6599 			ev.type = 0x00;
6600 		}
6601 		break;
6602 	case HCI_AMP:
6603 		ev.type = 0x02;
6604 		break;
6605 	default:
6606 		return;
6607 	}
6608 
6609 	ev.bus = hdev->bus;
6610 
6611 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6612 			 HCI_MGMT_EXT_INDEX_EVENTS);
6613 }
6614 
6615 /* This function requires the caller holds hdev->lock */
6616 static void restart_le_actions(struct hci_dev *hdev)
6617 {
6618 	struct hci_conn_params *p;
6619 
6620 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6621 		/* Needed for AUTO_OFF case where might not "really"
6622 		 * have been powered off.
6623 		 */
6624 		list_del_init(&p->action);
6625 
6626 		switch (p->auto_connect) {
6627 		case HCI_AUTO_CONN_DIRECT:
6628 		case HCI_AUTO_CONN_ALWAYS:
6629 			list_add(&p->action, &hdev->pend_le_conns);
6630 			break;
6631 		case HCI_AUTO_CONN_REPORT:
6632 			list_add(&p->action, &hdev->pend_le_reports);
6633 			break;
6634 		default:
6635 			break;
6636 		}
6637 	}
6638 }
6639 
6640 void mgmt_power_on(struct hci_dev *hdev, int err)
6641 {
6642 	struct cmd_lookup match = { NULL, hdev };
6643 
6644 	BT_DBG("err %d", err);
6645 
6646 	hci_dev_lock(hdev);
6647 
6648 	if (!err) {
6649 		restart_le_actions(hdev);
6650 		hci_update_background_scan(hdev);
6651 	}
6652 
6653 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6654 
6655 	new_settings(hdev, match.sk);
6656 
6657 	if (match.sk)
6658 		sock_put(match.sk);
6659 
6660 	hci_dev_unlock(hdev);
6661 }
6662 
6663 void __mgmt_power_off(struct hci_dev *hdev)
6664 {
6665 	struct cmd_lookup match = { NULL, hdev };
6666 	u8 status, zero_cod[] = { 0, 0, 0 };
6667 
6668 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6669 
6670 	/* If the power off is because of hdev unregistration let
6671 	 * use the appropriate INVALID_INDEX status. Otherwise use
6672 	 * NOT_POWERED. We cover both scenarios here since later in
6673 	 * mgmt_index_removed() any hci_conn callbacks will have already
6674 	 * been triggered, potentially causing misleading DISCONNECTED
6675 	 * status responses.
6676 	 */
6677 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6678 		status = MGMT_STATUS_INVALID_INDEX;
6679 	else
6680 		status = MGMT_STATUS_NOT_POWERED;
6681 
6682 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6683 
6684 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
6685 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6686 				   zero_cod, sizeof(zero_cod),
6687 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
6688 		ext_info_changed(hdev, NULL);
6689 	}
6690 
6691 	new_settings(hdev, match.sk);
6692 
6693 	if (match.sk)
6694 		sock_put(match.sk);
6695 }
6696 
6697 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6698 {
6699 	struct mgmt_pending_cmd *cmd;
6700 	u8 status;
6701 
6702 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6703 	if (!cmd)
6704 		return;
6705 
6706 	if (err == -ERFKILL)
6707 		status = MGMT_STATUS_RFKILLED;
6708 	else
6709 		status = MGMT_STATUS_FAILED;
6710 
6711 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6712 
6713 	mgmt_pending_remove(cmd);
6714 }
6715 
6716 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6717 		       bool persistent)
6718 {
6719 	struct mgmt_ev_new_link_key ev;
6720 
6721 	memset(&ev, 0, sizeof(ev));
6722 
6723 	ev.store_hint = persistent;
6724 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6725 	ev.key.addr.type = BDADDR_BREDR;
6726 	ev.key.type = key->type;
6727 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6728 	ev.key.pin_len = key->pin_len;
6729 
6730 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6731 }
6732 
6733 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6734 {
6735 	switch (ltk->type) {
6736 	case SMP_LTK:
6737 	case SMP_LTK_SLAVE:
6738 		if (ltk->authenticated)
6739 			return MGMT_LTK_AUTHENTICATED;
6740 		return MGMT_LTK_UNAUTHENTICATED;
6741 	case SMP_LTK_P256:
6742 		if (ltk->authenticated)
6743 			return MGMT_LTK_P256_AUTH;
6744 		return MGMT_LTK_P256_UNAUTH;
6745 	case SMP_LTK_P256_DEBUG:
6746 		return MGMT_LTK_P256_DEBUG;
6747 	}
6748 
6749 	return MGMT_LTK_UNAUTHENTICATED;
6750 }
6751 
6752 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6753 {
6754 	struct mgmt_ev_new_long_term_key ev;
6755 
6756 	memset(&ev, 0, sizeof(ev));
6757 
6758 	/* Devices using resolvable or non-resolvable random addresses
6759 	 * without providing an identity resolving key don't require
6760 	 * to store long term keys. Their addresses will change the
6761 	 * next time around.
6762 	 *
6763 	 * Only when a remote device provides an identity address
6764 	 * make sure the long term key is stored. If the remote
6765 	 * identity is known, the long term keys are internally
6766 	 * mapped to the identity address. So allow static random
6767 	 * and public addresses here.
6768 	 */
6769 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6770 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6771 		ev.store_hint = 0x00;
6772 	else
6773 		ev.store_hint = persistent;
6774 
6775 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6776 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6777 	ev.key.type = mgmt_ltk_type(key);
6778 	ev.key.enc_size = key->enc_size;
6779 	ev.key.ediv = key->ediv;
6780 	ev.key.rand = key->rand;
6781 
6782 	if (key->type == SMP_LTK)
6783 		ev.key.master = 1;
6784 
6785 	/* Make sure we copy only the significant bytes based on the
6786 	 * encryption key size, and set the rest of the value to zeroes.
6787 	 */
6788 	memcpy(ev.key.val, key->val, key->enc_size);
6789 	memset(ev.key.val + key->enc_size, 0,
6790 	       sizeof(ev.key.val) - key->enc_size);
6791 
6792 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6793 }
6794 
6795 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
6796 {
6797 	struct mgmt_ev_new_irk ev;
6798 
6799 	memset(&ev, 0, sizeof(ev));
6800 
6801 	ev.store_hint = persistent;
6802 
6803 	bacpy(&ev.rpa, &irk->rpa);
6804 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6805 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6806 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6807 
6808 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6809 }
6810 
6811 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6812 		   bool persistent)
6813 {
6814 	struct mgmt_ev_new_csrk ev;
6815 
6816 	memset(&ev, 0, sizeof(ev));
6817 
6818 	/* Devices using resolvable or non-resolvable random addresses
6819 	 * without providing an identity resolving key don't require
6820 	 * to store signature resolving keys. Their addresses will change
6821 	 * the next time around.
6822 	 *
6823 	 * Only when a remote device provides an identity address
6824 	 * make sure the signature resolving key is stored. So allow
6825 	 * static random and public addresses here.
6826 	 */
6827 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6828 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6829 		ev.store_hint = 0x00;
6830 	else
6831 		ev.store_hint = persistent;
6832 
6833 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6834 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6835 	ev.key.type = csrk->type;
6836 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6837 
6838 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6839 }
6840 
6841 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6842 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6843 			 u16 max_interval, u16 latency, u16 timeout)
6844 {
6845 	struct mgmt_ev_new_conn_param ev;
6846 
6847 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
6848 		return;
6849 
6850 	memset(&ev, 0, sizeof(ev));
6851 	bacpy(&ev.addr.bdaddr, bdaddr);
6852 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6853 	ev.store_hint = store_hint;
6854 	ev.min_interval = cpu_to_le16(min_interval);
6855 	ev.max_interval = cpu_to_le16(max_interval);
6856 	ev.latency = cpu_to_le16(latency);
6857 	ev.timeout = cpu_to_le16(timeout);
6858 
6859 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6860 }
6861 
6862 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
6863 			   u32 flags, u8 *name, u8 name_len)
6864 {
6865 	char buf[512];
6866 	struct mgmt_ev_device_connected *ev = (void *) buf;
6867 	u16 eir_len = 0;
6868 
6869 	bacpy(&ev->addr.bdaddr, &conn->dst);
6870 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
6871 
6872 	ev->flags = __cpu_to_le32(flags);
6873 
6874 	/* We must ensure that the EIR Data fields are ordered and
6875 	 * unique. Keep it simple for now and avoid the problem by not
6876 	 * adding any BR/EDR data to the LE adv.
6877 	 */
6878 	if (conn->le_adv_data_len > 0) {
6879 		memcpy(&ev->eir[eir_len],
6880 		       conn->le_adv_data, conn->le_adv_data_len);
6881 		eir_len = conn->le_adv_data_len;
6882 	} else {
6883 		if (name_len > 0)
6884 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6885 						  name, name_len);
6886 
6887 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
6888 			eir_len = eir_append_data(ev->eir, eir_len,
6889 						  EIR_CLASS_OF_DEV,
6890 						  conn->dev_class, 3);
6891 	}
6892 
6893 	ev->eir_len = cpu_to_le16(eir_len);
6894 
6895 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6896 		    sizeof(*ev) + eir_len, NULL);
6897 }
6898 
6899 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
6900 {
6901 	struct sock **sk = data;
6902 
6903 	cmd->cmd_complete(cmd, 0);
6904 
6905 	*sk = cmd->sk;
6906 	sock_hold(*sk);
6907 
6908 	mgmt_pending_remove(cmd);
6909 }
6910 
6911 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
6912 {
6913 	struct hci_dev *hdev = data;
6914 	struct mgmt_cp_unpair_device *cp = cmd->param;
6915 
6916 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6917 
6918 	cmd->cmd_complete(cmd, 0);
6919 	mgmt_pending_remove(cmd);
6920 }
6921 
6922 bool mgmt_powering_down(struct hci_dev *hdev)
6923 {
6924 	struct mgmt_pending_cmd *cmd;
6925 	struct mgmt_mode *cp;
6926 
6927 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
6928 	if (!cmd)
6929 		return false;
6930 
6931 	cp = cmd->param;
6932 	if (!cp->val)
6933 		return true;
6934 
6935 	return false;
6936 }
6937 
6938 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6939 			      u8 link_type, u8 addr_type, u8 reason,
6940 			      bool mgmt_connected)
6941 {
6942 	struct mgmt_ev_device_disconnected ev;
6943 	struct sock *sk = NULL;
6944 
6945 	/* The connection is still in hci_conn_hash so test for 1
6946 	 * instead of 0 to know if this is the last one.
6947 	 */
6948 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
6949 		cancel_delayed_work(&hdev->power_off);
6950 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
6951 	}
6952 
6953 	if (!mgmt_connected)
6954 		return;
6955 
6956 	if (link_type != ACL_LINK && link_type != LE_LINK)
6957 		return;
6958 
6959 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6960 
6961 	bacpy(&ev.addr.bdaddr, bdaddr);
6962 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
6963 	ev.reason = reason;
6964 
6965 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6966 
6967 	if (sk)
6968 		sock_put(sk);
6969 
6970 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6971 			     hdev);
6972 }
6973 
6974 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6975 			    u8 link_type, u8 addr_type, u8 status)
6976 {
6977 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6978 	struct mgmt_cp_disconnect *cp;
6979 	struct mgmt_pending_cmd *cmd;
6980 
6981 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6982 			     hdev);
6983 
6984 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
6985 	if (!cmd)
6986 		return;
6987 
6988 	cp = cmd->param;
6989 
6990 	if (bacmp(bdaddr, &cp->addr.bdaddr))
6991 		return;
6992 
6993 	if (cp->addr.type != bdaddr_type)
6994 		return;
6995 
6996 	cmd->cmd_complete(cmd, mgmt_status(status));
6997 	mgmt_pending_remove(cmd);
6998 }
6999 
7000 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7001 			 u8 addr_type, u8 status)
7002 {
7003 	struct mgmt_ev_connect_failed ev;
7004 
7005 	/* The connection is still in hci_conn_hash so test for 1
7006 	 * instead of 0 to know if this is the last one.
7007 	 */
7008 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7009 		cancel_delayed_work(&hdev->power_off);
7010 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7011 	}
7012 
7013 	bacpy(&ev.addr.bdaddr, bdaddr);
7014 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7015 	ev.status = mgmt_status(status);
7016 
7017 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7018 }
7019 
7020 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7021 {
7022 	struct mgmt_ev_pin_code_request ev;
7023 
7024 	bacpy(&ev.addr.bdaddr, bdaddr);
7025 	ev.addr.type = BDADDR_BREDR;
7026 	ev.secure = secure;
7027 
7028 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7029 }
7030 
7031 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7032 				  u8 status)
7033 {
7034 	struct mgmt_pending_cmd *cmd;
7035 
7036 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7037 	if (!cmd)
7038 		return;
7039 
7040 	cmd->cmd_complete(cmd, mgmt_status(status));
7041 	mgmt_pending_remove(cmd);
7042 }
7043 
7044 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7045 				      u8 status)
7046 {
7047 	struct mgmt_pending_cmd *cmd;
7048 
7049 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7050 	if (!cmd)
7051 		return;
7052 
7053 	cmd->cmd_complete(cmd, mgmt_status(status));
7054 	mgmt_pending_remove(cmd);
7055 }
7056 
7057 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7058 			      u8 link_type, u8 addr_type, u32 value,
7059 			      u8 confirm_hint)
7060 {
7061 	struct mgmt_ev_user_confirm_request ev;
7062 
7063 	BT_DBG("%s", hdev->name);
7064 
7065 	bacpy(&ev.addr.bdaddr, bdaddr);
7066 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7067 	ev.confirm_hint = confirm_hint;
7068 	ev.value = cpu_to_le32(value);
7069 
7070 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7071 			  NULL);
7072 }
7073 
7074 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7075 			      u8 link_type, u8 addr_type)
7076 {
7077 	struct mgmt_ev_user_passkey_request ev;
7078 
7079 	BT_DBG("%s", hdev->name);
7080 
7081 	bacpy(&ev.addr.bdaddr, bdaddr);
7082 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7083 
7084 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7085 			  NULL);
7086 }
7087 
7088 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7089 				      u8 link_type, u8 addr_type, u8 status,
7090 				      u8 opcode)
7091 {
7092 	struct mgmt_pending_cmd *cmd;
7093 
7094 	cmd = pending_find(opcode, hdev);
7095 	if (!cmd)
7096 		return -ENOENT;
7097 
7098 	cmd->cmd_complete(cmd, mgmt_status(status));
7099 	mgmt_pending_remove(cmd);
7100 
7101 	return 0;
7102 }
7103 
7104 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7105 				     u8 link_type, u8 addr_type, u8 status)
7106 {
7107 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7108 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7109 }
7110 
7111 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7112 					 u8 link_type, u8 addr_type, u8 status)
7113 {
7114 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7115 					  status,
7116 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7117 }
7118 
7119 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7120 				     u8 link_type, u8 addr_type, u8 status)
7121 {
7122 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7123 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7124 }
7125 
7126 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7127 					 u8 link_type, u8 addr_type, u8 status)
7128 {
7129 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7130 					  status,
7131 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7132 }
7133 
7134 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7135 			     u8 link_type, u8 addr_type, u32 passkey,
7136 			     u8 entered)
7137 {
7138 	struct mgmt_ev_passkey_notify ev;
7139 
7140 	BT_DBG("%s", hdev->name);
7141 
7142 	bacpy(&ev.addr.bdaddr, bdaddr);
7143 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7144 	ev.passkey = __cpu_to_le32(passkey);
7145 	ev.entered = entered;
7146 
7147 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7148 }
7149 
7150 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7151 {
7152 	struct mgmt_ev_auth_failed ev;
7153 	struct mgmt_pending_cmd *cmd;
7154 	u8 status = mgmt_status(hci_status);
7155 
7156 	bacpy(&ev.addr.bdaddr, &conn->dst);
7157 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7158 	ev.status = status;
7159 
7160 	cmd = find_pairing(conn);
7161 
7162 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7163 		    cmd ? cmd->sk : NULL);
7164 
7165 	if (cmd) {
7166 		cmd->cmd_complete(cmd, status);
7167 		mgmt_pending_remove(cmd);
7168 	}
7169 }
7170 
7171 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7172 {
7173 	struct cmd_lookup match = { NULL, hdev };
7174 	bool changed;
7175 
7176 	if (status) {
7177 		u8 mgmt_err = mgmt_status(status);
7178 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7179 				     cmd_status_rsp, &mgmt_err);
7180 		return;
7181 	}
7182 
7183 	if (test_bit(HCI_AUTH, &hdev->flags))
7184 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7185 	else
7186 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7187 
7188 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7189 			     &match);
7190 
7191 	if (changed)
7192 		new_settings(hdev, match.sk);
7193 
7194 	if (match.sk)
7195 		sock_put(match.sk);
7196 }
7197 
7198 static void clear_eir(struct hci_request *req)
7199 {
7200 	struct hci_dev *hdev = req->hdev;
7201 	struct hci_cp_write_eir cp;
7202 
7203 	if (!lmp_ext_inq_capable(hdev))
7204 		return;
7205 
7206 	memset(hdev->eir, 0, sizeof(hdev->eir));
7207 
7208 	memset(&cp, 0, sizeof(cp));
7209 
7210 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7211 }
7212 
7213 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7214 {
7215 	struct cmd_lookup match = { NULL, hdev };
7216 	struct hci_request req;
7217 	bool changed = false;
7218 
7219 	if (status) {
7220 		u8 mgmt_err = mgmt_status(status);
7221 
7222 		if (enable && hci_dev_test_and_clear_flag(hdev,
7223 							  HCI_SSP_ENABLED)) {
7224 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7225 			new_settings(hdev, NULL);
7226 		}
7227 
7228 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7229 				     &mgmt_err);
7230 		return;
7231 	}
7232 
7233 	if (enable) {
7234 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7235 	} else {
7236 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7237 		if (!changed)
7238 			changed = hci_dev_test_and_clear_flag(hdev,
7239 							      HCI_HS_ENABLED);
7240 		else
7241 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7242 	}
7243 
7244 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7245 
7246 	if (changed)
7247 		new_settings(hdev, match.sk);
7248 
7249 	if (match.sk)
7250 		sock_put(match.sk);
7251 
7252 	hci_req_init(&req, hdev);
7253 
7254 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7255 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7256 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7257 				    sizeof(enable), &enable);
7258 		__hci_req_update_eir(&req);
7259 	} else {
7260 		clear_eir(&req);
7261 	}
7262 
7263 	hci_req_run(&req, NULL);
7264 }
7265 
7266 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7267 {
7268 	struct cmd_lookup *match = data;
7269 
7270 	if (match->sk == NULL) {
7271 		match->sk = cmd->sk;
7272 		sock_hold(match->sk);
7273 	}
7274 }
7275 
7276 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7277 				    u8 status)
7278 {
7279 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7280 
7281 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7282 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7283 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7284 
7285 	if (!status) {
7286 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7287 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7288 		ext_info_changed(hdev, NULL);
7289 	}
7290 
7291 	if (match.sk)
7292 		sock_put(match.sk);
7293 }
7294 
7295 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7296 {
7297 	struct mgmt_cp_set_local_name ev;
7298 	struct mgmt_pending_cmd *cmd;
7299 
7300 	if (status)
7301 		return;
7302 
7303 	memset(&ev, 0, sizeof(ev));
7304 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7305 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7306 
7307 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7308 	if (!cmd) {
7309 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7310 
7311 		/* If this is a HCI command related to powering on the
7312 		 * HCI dev don't send any mgmt signals.
7313 		 */
7314 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7315 			return;
7316 	}
7317 
7318 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7319 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7320 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7321 }
7322 
7323 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7324 {
7325 	int i;
7326 
7327 	for (i = 0; i < uuid_count; i++) {
7328 		if (!memcmp(uuid, uuids[i], 16))
7329 			return true;
7330 	}
7331 
7332 	return false;
7333 }
7334 
7335 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7336 {
7337 	u16 parsed = 0;
7338 
7339 	while (parsed < eir_len) {
7340 		u8 field_len = eir[0];
7341 		u8 uuid[16];
7342 		int i;
7343 
7344 		if (field_len == 0)
7345 			break;
7346 
7347 		if (eir_len - parsed < field_len + 1)
7348 			break;
7349 
7350 		switch (eir[1]) {
7351 		case EIR_UUID16_ALL:
7352 		case EIR_UUID16_SOME:
7353 			for (i = 0; i + 3 <= field_len; i += 2) {
7354 				memcpy(uuid, bluetooth_base_uuid, 16);
7355 				uuid[13] = eir[i + 3];
7356 				uuid[12] = eir[i + 2];
7357 				if (has_uuid(uuid, uuid_count, uuids))
7358 					return true;
7359 			}
7360 			break;
7361 		case EIR_UUID32_ALL:
7362 		case EIR_UUID32_SOME:
7363 			for (i = 0; i + 5 <= field_len; i += 4) {
7364 				memcpy(uuid, bluetooth_base_uuid, 16);
7365 				uuid[15] = eir[i + 5];
7366 				uuid[14] = eir[i + 4];
7367 				uuid[13] = eir[i + 3];
7368 				uuid[12] = eir[i + 2];
7369 				if (has_uuid(uuid, uuid_count, uuids))
7370 					return true;
7371 			}
7372 			break;
7373 		case EIR_UUID128_ALL:
7374 		case EIR_UUID128_SOME:
7375 			for (i = 0; i + 17 <= field_len; i += 16) {
7376 				memcpy(uuid, eir + i + 2, 16);
7377 				if (has_uuid(uuid, uuid_count, uuids))
7378 					return true;
7379 			}
7380 			break;
7381 		}
7382 
7383 		parsed += field_len + 1;
7384 		eir += field_len + 1;
7385 	}
7386 
7387 	return false;
7388 }
7389 
7390 static void restart_le_scan(struct hci_dev *hdev)
7391 {
7392 	/* If controller is not scanning we are done. */
7393 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7394 		return;
7395 
7396 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7397 		       hdev->discovery.scan_start +
7398 		       hdev->discovery.scan_duration))
7399 		return;
7400 
7401 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7402 			   DISCOV_LE_RESTART_DELAY);
7403 }
7404 
7405 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7406 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7407 {
7408 	/* If a RSSI threshold has been specified, and
7409 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7410 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7411 	 * is set, let it through for further processing, as we might need to
7412 	 * restart the scan.
7413 	 *
7414 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7415 	 * the results are also dropped.
7416 	 */
7417 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7418 	    (rssi == HCI_RSSI_INVALID ||
7419 	    (rssi < hdev->discovery.rssi &&
7420 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7421 		return  false;
7422 
7423 	if (hdev->discovery.uuid_count != 0) {
7424 		/* If a list of UUIDs is provided in filter, results with no
7425 		 * matching UUID should be dropped.
7426 		 */
7427 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7428 				   hdev->discovery.uuids) &&
7429 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
7430 				   hdev->discovery.uuid_count,
7431 				   hdev->discovery.uuids))
7432 			return false;
7433 	}
7434 
7435 	/* If duplicate filtering does not report RSSI changes, then restart
7436 	 * scanning to ensure updated result with updated RSSI values.
7437 	 */
7438 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7439 		restart_le_scan(hdev);
7440 
7441 		/* Validate RSSI value against the RSSI threshold once more. */
7442 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7443 		    rssi < hdev->discovery.rssi)
7444 			return false;
7445 	}
7446 
7447 	return true;
7448 }
7449 
7450 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7451 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7452 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7453 {
7454 	char buf[512];
7455 	struct mgmt_ev_device_found *ev = (void *)buf;
7456 	size_t ev_size;
7457 
7458 	/* Don't send events for a non-kernel initiated discovery. With
7459 	 * LE one exception is if we have pend_le_reports > 0 in which
7460 	 * case we're doing passive scanning and want these events.
7461 	 */
7462 	if (!hci_discovery_active(hdev)) {
7463 		if (link_type == ACL_LINK)
7464 			return;
7465 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7466 			return;
7467 	}
7468 
7469 	if (hdev->discovery.result_filtering) {
7470 		/* We are using service discovery */
7471 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7472 				     scan_rsp_len))
7473 			return;
7474 	}
7475 
7476 	if (hdev->discovery.limited) {
7477 		/* Check for limited discoverable bit */
7478 		if (dev_class) {
7479 			if (!(dev_class[1] & 0x20))
7480 				return;
7481 		} else {
7482 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7483 			if (!flags || !(flags[0] & LE_AD_LIMITED))
7484 				return;
7485 		}
7486 	}
7487 
7488 	/* Make sure that the buffer is big enough. The 5 extra bytes
7489 	 * are for the potential CoD field.
7490 	 */
7491 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7492 		return;
7493 
7494 	memset(buf, 0, sizeof(buf));
7495 
7496 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7497 	 * RSSI value was reported as 0 when not available. This behavior
7498 	 * is kept when using device discovery. This is required for full
7499 	 * backwards compatibility with the API.
7500 	 *
7501 	 * However when using service discovery, the value 127 will be
7502 	 * returned when the RSSI is not available.
7503 	 */
7504 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7505 	    link_type == ACL_LINK)
7506 		rssi = 0;
7507 
7508 	bacpy(&ev->addr.bdaddr, bdaddr);
7509 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7510 	ev->rssi = rssi;
7511 	ev->flags = cpu_to_le32(flags);
7512 
7513 	if (eir_len > 0)
7514 		/* Copy EIR or advertising data into event */
7515 		memcpy(ev->eir, eir, eir_len);
7516 
7517 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7518 				       NULL))
7519 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7520 					  dev_class, 3);
7521 
7522 	if (scan_rsp_len > 0)
7523 		/* Append scan response data to event */
7524 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7525 
7526 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7527 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7528 
7529 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7530 }
7531 
7532 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7533 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7534 {
7535 	struct mgmt_ev_device_found *ev;
7536 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7537 	u16 eir_len;
7538 
7539 	ev = (struct mgmt_ev_device_found *) buf;
7540 
7541 	memset(buf, 0, sizeof(buf));
7542 
7543 	bacpy(&ev->addr.bdaddr, bdaddr);
7544 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7545 	ev->rssi = rssi;
7546 
7547 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7548 				  name_len);
7549 
7550 	ev->eir_len = cpu_to_le16(eir_len);
7551 
7552 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7553 }
7554 
7555 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7556 {
7557 	struct mgmt_ev_discovering ev;
7558 
7559 	BT_DBG("%s discovering %u", hdev->name, discovering);
7560 
7561 	memset(&ev, 0, sizeof(ev));
7562 	ev.type = hdev->discovery.type;
7563 	ev.discovering = discovering;
7564 
7565 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7566 }
7567 
7568 static struct hci_mgmt_chan chan = {
7569 	.channel	= HCI_CHANNEL_CONTROL,
7570 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
7571 	.handlers	= mgmt_handlers,
7572 	.hdev_init	= mgmt_init_hdev,
7573 };
7574 
7575 int mgmt_init(void)
7576 {
7577 	return hci_mgmt_chan_register(&chan);
7578 }
7579 
7580 void mgmt_exit(void)
7581 {
7582 	hci_mgmt_chan_unregister(&chan);
7583 }
7584