xref: /linux/net/bluetooth/mgmt.c (revision de73b5a97bba1538f065e1e90d8eeac399db7510)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	19
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_SET_BLOCKED_KEYS,
112 	MGMT_OP_SET_WIDEBAND_SPEECH,
113 	MGMT_OP_READ_CONTROLLER_CAP,
114 	MGMT_OP_READ_EXP_FEATURES_INFO,
115 	MGMT_OP_SET_EXP_FEATURE,
116 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 	MGMT_OP_GET_DEVICE_FLAGS,
121 	MGMT_OP_SET_DEVICE_FLAGS,
122 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 	MGMT_OP_REMOVE_ADV_MONITOR,
125 	MGMT_OP_ADD_EXT_ADV_PARAMS,
126 	MGMT_OP_ADD_EXT_ADV_DATA,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
128 };
129 
130 static const u16 mgmt_events[] = {
131 	MGMT_EV_CONTROLLER_ERROR,
132 	MGMT_EV_INDEX_ADDED,
133 	MGMT_EV_INDEX_REMOVED,
134 	MGMT_EV_NEW_SETTINGS,
135 	MGMT_EV_CLASS_OF_DEV_CHANGED,
136 	MGMT_EV_LOCAL_NAME_CHANGED,
137 	MGMT_EV_NEW_LINK_KEY,
138 	MGMT_EV_NEW_LONG_TERM_KEY,
139 	MGMT_EV_DEVICE_CONNECTED,
140 	MGMT_EV_DEVICE_DISCONNECTED,
141 	MGMT_EV_CONNECT_FAILED,
142 	MGMT_EV_PIN_CODE_REQUEST,
143 	MGMT_EV_USER_CONFIRM_REQUEST,
144 	MGMT_EV_USER_PASSKEY_REQUEST,
145 	MGMT_EV_AUTH_FAILED,
146 	MGMT_EV_DEVICE_FOUND,
147 	MGMT_EV_DISCOVERING,
148 	MGMT_EV_DEVICE_BLOCKED,
149 	MGMT_EV_DEVICE_UNBLOCKED,
150 	MGMT_EV_DEVICE_UNPAIRED,
151 	MGMT_EV_PASSKEY_NOTIFY,
152 	MGMT_EV_NEW_IRK,
153 	MGMT_EV_NEW_CSRK,
154 	MGMT_EV_DEVICE_ADDED,
155 	MGMT_EV_DEVICE_REMOVED,
156 	MGMT_EV_NEW_CONN_PARAM,
157 	MGMT_EV_UNCONF_INDEX_ADDED,
158 	MGMT_EV_UNCONF_INDEX_REMOVED,
159 	MGMT_EV_NEW_CONFIG_OPTIONS,
160 	MGMT_EV_EXT_INDEX_ADDED,
161 	MGMT_EV_EXT_INDEX_REMOVED,
162 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
163 	MGMT_EV_ADVERTISING_ADDED,
164 	MGMT_EV_ADVERTISING_REMOVED,
165 	MGMT_EV_EXT_INFO_CHANGED,
166 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
167 	MGMT_EV_EXP_FEATURE_CHANGED,
168 	MGMT_EV_DEVICE_FLAGS_CHANGED,
169 	MGMT_EV_CONTROLLER_SUSPEND,
170 	MGMT_EV_CONTROLLER_RESUME,
171 };
172 
173 static const u16 mgmt_untrusted_commands[] = {
174 	MGMT_OP_READ_INDEX_LIST,
175 	MGMT_OP_READ_INFO,
176 	MGMT_OP_READ_UNCONF_INDEX_LIST,
177 	MGMT_OP_READ_CONFIG_INFO,
178 	MGMT_OP_READ_EXT_INDEX_LIST,
179 	MGMT_OP_READ_EXT_INFO,
180 	MGMT_OP_READ_CONTROLLER_CAP,
181 	MGMT_OP_READ_EXP_FEATURES_INFO,
182 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
183 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
184 };
185 
186 static const u16 mgmt_untrusted_events[] = {
187 	MGMT_EV_INDEX_ADDED,
188 	MGMT_EV_INDEX_REMOVED,
189 	MGMT_EV_NEW_SETTINGS,
190 	MGMT_EV_CLASS_OF_DEV_CHANGED,
191 	MGMT_EV_LOCAL_NAME_CHANGED,
192 	MGMT_EV_UNCONF_INDEX_ADDED,
193 	MGMT_EV_UNCONF_INDEX_REMOVED,
194 	MGMT_EV_NEW_CONFIG_OPTIONS,
195 	MGMT_EV_EXT_INDEX_ADDED,
196 	MGMT_EV_EXT_INDEX_REMOVED,
197 	MGMT_EV_EXT_INFO_CHANGED,
198 	MGMT_EV_EXP_FEATURE_CHANGED,
199 	MGMT_EV_ADV_MONITOR_ADDED,
200 	MGMT_EV_ADV_MONITOR_REMOVED,
201 };
202 
203 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
204 
205 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
206 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
207 
208 /* HCI to MGMT error code conversion table */
209 static const u8 mgmt_status_table[] = {
210 	MGMT_STATUS_SUCCESS,
211 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
212 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
213 	MGMT_STATUS_FAILED,		/* Hardware Failure */
214 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
215 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
216 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
217 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
218 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
219 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
220 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
221 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
222 	MGMT_STATUS_BUSY,		/* Command Disallowed */
223 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
224 	MGMT_STATUS_REJECTED,		/* Rejected Security */
225 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
226 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
227 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
228 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
229 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
230 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
231 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
232 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
233 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
234 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
235 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
236 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
237 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
238 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
239 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
240 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
241 	MGMT_STATUS_FAILED,		/* Unspecified Error */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
243 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
244 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
245 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
246 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
247 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
248 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
249 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
250 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
251 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
252 	MGMT_STATUS_FAILED,		/* Transaction Collision */
253 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
254 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
256 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
257 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
258 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
259 	MGMT_STATUS_FAILED,		/* Slot Violation */
260 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
261 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
263 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
264 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
265 	MGMT_STATUS_BUSY,		/* Controller Busy */
266 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
267 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
268 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
269 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
270 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
271 };
272 
273 static u8 mgmt_status(u8 hci_status)
274 {
275 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
276 		return mgmt_status_table[hci_status];
277 
278 	return MGMT_STATUS_FAILED;
279 }
280 
281 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
282 			    u16 len, int flag)
283 {
284 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
285 			       flag, NULL);
286 }
287 
288 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
289 			      u16 len, int flag, struct sock *skip_sk)
290 {
291 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
292 			       flag, skip_sk);
293 }
294 
295 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
296 		      struct sock *skip_sk)
297 {
298 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
299 			       HCI_SOCK_TRUSTED, skip_sk);
300 }
301 
302 static u8 le_addr_type(u8 mgmt_addr_type)
303 {
304 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
305 		return ADDR_LE_DEV_PUBLIC;
306 	else
307 		return ADDR_LE_DEV_RANDOM;
308 }
309 
310 void mgmt_fill_version_info(void *ver)
311 {
312 	struct mgmt_rp_read_version *rp = ver;
313 
314 	rp->version = MGMT_VERSION;
315 	rp->revision = cpu_to_le16(MGMT_REVISION);
316 }
317 
318 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
319 			u16 data_len)
320 {
321 	struct mgmt_rp_read_version rp;
322 
323 	bt_dev_dbg(hdev, "sock %p", sk);
324 
325 	mgmt_fill_version_info(&rp);
326 
327 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
328 				 &rp, sizeof(rp));
329 }
330 
331 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
332 			 u16 data_len)
333 {
334 	struct mgmt_rp_read_commands *rp;
335 	u16 num_commands, num_events;
336 	size_t rp_size;
337 	int i, err;
338 
339 	bt_dev_dbg(hdev, "sock %p", sk);
340 
341 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
342 		num_commands = ARRAY_SIZE(mgmt_commands);
343 		num_events = ARRAY_SIZE(mgmt_events);
344 	} else {
345 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
346 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
347 	}
348 
349 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
350 
351 	rp = kmalloc(rp_size, GFP_KERNEL);
352 	if (!rp)
353 		return -ENOMEM;
354 
355 	rp->num_commands = cpu_to_le16(num_commands);
356 	rp->num_events = cpu_to_le16(num_events);
357 
358 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
359 		__le16 *opcode = rp->opcodes;
360 
361 		for (i = 0; i < num_commands; i++, opcode++)
362 			put_unaligned_le16(mgmt_commands[i], opcode);
363 
364 		for (i = 0; i < num_events; i++, opcode++)
365 			put_unaligned_le16(mgmt_events[i], opcode);
366 	} else {
367 		__le16 *opcode = rp->opcodes;
368 
369 		for (i = 0; i < num_commands; i++, opcode++)
370 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
371 
372 		for (i = 0; i < num_events; i++, opcode++)
373 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
374 	}
375 
376 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
377 				rp, rp_size);
378 	kfree(rp);
379 
380 	return err;
381 }
382 
383 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
384 			   u16 data_len)
385 {
386 	struct mgmt_rp_read_index_list *rp;
387 	struct hci_dev *d;
388 	size_t rp_len;
389 	u16 count;
390 	int err;
391 
392 	bt_dev_dbg(hdev, "sock %p", sk);
393 
394 	read_lock(&hci_dev_list_lock);
395 
396 	count = 0;
397 	list_for_each_entry(d, &hci_dev_list, list) {
398 		if (d->dev_type == HCI_PRIMARY &&
399 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
400 			count++;
401 	}
402 
403 	rp_len = sizeof(*rp) + (2 * count);
404 	rp = kmalloc(rp_len, GFP_ATOMIC);
405 	if (!rp) {
406 		read_unlock(&hci_dev_list_lock);
407 		return -ENOMEM;
408 	}
409 
410 	count = 0;
411 	list_for_each_entry(d, &hci_dev_list, list) {
412 		if (hci_dev_test_flag(d, HCI_SETUP) ||
413 		    hci_dev_test_flag(d, HCI_CONFIG) ||
414 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
415 			continue;
416 
417 		/* Devices marked as raw-only are neither configured
418 		 * nor unconfigured controllers.
419 		 */
420 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
421 			continue;
422 
423 		if (d->dev_type == HCI_PRIMARY &&
424 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
425 			rp->index[count++] = cpu_to_le16(d->id);
426 			bt_dev_dbg(hdev, "Added hci%u", d->id);
427 		}
428 	}
429 
430 	rp->num_controllers = cpu_to_le16(count);
431 	rp_len = sizeof(*rp) + (2 * count);
432 
433 	read_unlock(&hci_dev_list_lock);
434 
435 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
436 				0, rp, rp_len);
437 
438 	kfree(rp);
439 
440 	return err;
441 }
442 
443 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
444 				  void *data, u16 data_len)
445 {
446 	struct mgmt_rp_read_unconf_index_list *rp;
447 	struct hci_dev *d;
448 	size_t rp_len;
449 	u16 count;
450 	int err;
451 
452 	bt_dev_dbg(hdev, "sock %p", sk);
453 
454 	read_lock(&hci_dev_list_lock);
455 
456 	count = 0;
457 	list_for_each_entry(d, &hci_dev_list, list) {
458 		if (d->dev_type == HCI_PRIMARY &&
459 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
460 			count++;
461 	}
462 
463 	rp_len = sizeof(*rp) + (2 * count);
464 	rp = kmalloc(rp_len, GFP_ATOMIC);
465 	if (!rp) {
466 		read_unlock(&hci_dev_list_lock);
467 		return -ENOMEM;
468 	}
469 
470 	count = 0;
471 	list_for_each_entry(d, &hci_dev_list, list) {
472 		if (hci_dev_test_flag(d, HCI_SETUP) ||
473 		    hci_dev_test_flag(d, HCI_CONFIG) ||
474 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
475 			continue;
476 
477 		/* Devices marked as raw-only are neither configured
478 		 * nor unconfigured controllers.
479 		 */
480 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
481 			continue;
482 
483 		if (d->dev_type == HCI_PRIMARY &&
484 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
485 			rp->index[count++] = cpu_to_le16(d->id);
486 			bt_dev_dbg(hdev, "Added hci%u", d->id);
487 		}
488 	}
489 
490 	rp->num_controllers = cpu_to_le16(count);
491 	rp_len = sizeof(*rp) + (2 * count);
492 
493 	read_unlock(&hci_dev_list_lock);
494 
495 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
496 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
497 
498 	kfree(rp);
499 
500 	return err;
501 }
502 
503 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
504 			       void *data, u16 data_len)
505 {
506 	struct mgmt_rp_read_ext_index_list *rp;
507 	struct hci_dev *d;
508 	u16 count;
509 	int err;
510 
511 	bt_dev_dbg(hdev, "sock %p", sk);
512 
513 	read_lock(&hci_dev_list_lock);
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
518 			count++;
519 	}
520 
521 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
522 	if (!rp) {
523 		read_unlock(&hci_dev_list_lock);
524 		return -ENOMEM;
525 	}
526 
527 	count = 0;
528 	list_for_each_entry(d, &hci_dev_list, list) {
529 		if (hci_dev_test_flag(d, HCI_SETUP) ||
530 		    hci_dev_test_flag(d, HCI_CONFIG) ||
531 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
532 			continue;
533 
534 		/* Devices marked as raw-only are neither configured
535 		 * nor unconfigured controllers.
536 		 */
537 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
538 			continue;
539 
540 		if (d->dev_type == HCI_PRIMARY) {
541 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
542 				rp->entry[count].type = 0x01;
543 			else
544 				rp->entry[count].type = 0x00;
545 		} else if (d->dev_type == HCI_AMP) {
546 			rp->entry[count].type = 0x02;
547 		} else {
548 			continue;
549 		}
550 
551 		rp->entry[count].bus = d->bus;
552 		rp->entry[count++].index = cpu_to_le16(d->id);
553 		bt_dev_dbg(hdev, "Added hci%u", d->id);
554 	}
555 
556 	rp->num_controllers = cpu_to_le16(count);
557 
558 	read_unlock(&hci_dev_list_lock);
559 
560 	/* If this command is called at least once, then all the
561 	 * default index and unconfigured index events are disabled
562 	 * and from now on only extended index events are used.
563 	 */
564 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
565 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
566 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
567 
568 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
569 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
570 				struct_size(rp, entry, count));
571 
572 	kfree(rp);
573 
574 	return err;
575 }
576 
577 static bool is_configured(struct hci_dev *hdev)
578 {
579 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
580 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
581 		return false;
582 
583 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
584 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
585 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
586 		return false;
587 
588 	return true;
589 }
590 
591 static __le32 get_missing_options(struct hci_dev *hdev)
592 {
593 	u32 options = 0;
594 
595 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
596 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
597 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
598 
599 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
600 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
601 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
602 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
603 
604 	return cpu_to_le32(options);
605 }
606 
607 static int new_options(struct hci_dev *hdev, struct sock *skip)
608 {
609 	__le32 options = get_missing_options(hdev);
610 
611 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
612 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
613 }
614 
615 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
616 {
617 	__le32 options = get_missing_options(hdev);
618 
619 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
620 				 sizeof(options));
621 }
622 
623 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
624 			    void *data, u16 data_len)
625 {
626 	struct mgmt_rp_read_config_info rp;
627 	u32 options = 0;
628 
629 	bt_dev_dbg(hdev, "sock %p", sk);
630 
631 	hci_dev_lock(hdev);
632 
633 	memset(&rp, 0, sizeof(rp));
634 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
635 
636 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
637 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
638 
639 	if (hdev->set_bdaddr)
640 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
641 
642 	rp.supported_options = cpu_to_le32(options);
643 	rp.missing_options = get_missing_options(hdev);
644 
645 	hci_dev_unlock(hdev);
646 
647 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
648 				 &rp, sizeof(rp));
649 }
650 
651 static u32 get_supported_phys(struct hci_dev *hdev)
652 {
653 	u32 supported_phys = 0;
654 
655 	if (lmp_bredr_capable(hdev)) {
656 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
657 
658 		if (hdev->features[0][0] & LMP_3SLOT)
659 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
660 
661 		if (hdev->features[0][0] & LMP_5SLOT)
662 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
663 
664 		if (lmp_edr_2m_capable(hdev)) {
665 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
666 
667 			if (lmp_edr_3slot_capable(hdev))
668 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
669 
670 			if (lmp_edr_5slot_capable(hdev))
671 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
672 
673 			if (lmp_edr_3m_capable(hdev)) {
674 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
675 
676 				if (lmp_edr_3slot_capable(hdev))
677 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
678 
679 				if (lmp_edr_5slot_capable(hdev))
680 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
681 			}
682 		}
683 	}
684 
685 	if (lmp_le_capable(hdev)) {
686 		supported_phys |= MGMT_PHY_LE_1M_TX;
687 		supported_phys |= MGMT_PHY_LE_1M_RX;
688 
689 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
690 			supported_phys |= MGMT_PHY_LE_2M_TX;
691 			supported_phys |= MGMT_PHY_LE_2M_RX;
692 		}
693 
694 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
695 			supported_phys |= MGMT_PHY_LE_CODED_TX;
696 			supported_phys |= MGMT_PHY_LE_CODED_RX;
697 		}
698 	}
699 
700 	return supported_phys;
701 }
702 
703 static u32 get_selected_phys(struct hci_dev *hdev)
704 {
705 	u32 selected_phys = 0;
706 
707 	if (lmp_bredr_capable(hdev)) {
708 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
709 
710 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
711 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
712 
713 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
714 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
715 
716 		if (lmp_edr_2m_capable(hdev)) {
717 			if (!(hdev->pkt_type & HCI_2DH1))
718 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 
720 			if (lmp_edr_3slot_capable(hdev) &&
721 			    !(hdev->pkt_type & HCI_2DH3))
722 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
723 
724 			if (lmp_edr_5slot_capable(hdev) &&
725 			    !(hdev->pkt_type & HCI_2DH5))
726 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
727 
728 			if (lmp_edr_3m_capable(hdev)) {
729 				if (!(hdev->pkt_type & HCI_3DH1))
730 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
731 
732 				if (lmp_edr_3slot_capable(hdev) &&
733 				    !(hdev->pkt_type & HCI_3DH3))
734 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
735 
736 				if (lmp_edr_5slot_capable(hdev) &&
737 				    !(hdev->pkt_type & HCI_3DH5))
738 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
739 			}
740 		}
741 	}
742 
743 	if (lmp_le_capable(hdev)) {
744 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
745 			selected_phys |= MGMT_PHY_LE_1M_TX;
746 
747 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
748 			selected_phys |= MGMT_PHY_LE_1M_RX;
749 
750 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
751 			selected_phys |= MGMT_PHY_LE_2M_TX;
752 
753 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
754 			selected_phys |= MGMT_PHY_LE_2M_RX;
755 
756 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
757 			selected_phys |= MGMT_PHY_LE_CODED_TX;
758 
759 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
760 			selected_phys |= MGMT_PHY_LE_CODED_RX;
761 	}
762 
763 	return selected_phys;
764 }
765 
766 static u32 get_configurable_phys(struct hci_dev *hdev)
767 {
768 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
769 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
770 }
771 
772 static u32 get_supported_settings(struct hci_dev *hdev)
773 {
774 	u32 settings = 0;
775 
776 	settings |= MGMT_SETTING_POWERED;
777 	settings |= MGMT_SETTING_BONDABLE;
778 	settings |= MGMT_SETTING_DEBUG_KEYS;
779 	settings |= MGMT_SETTING_CONNECTABLE;
780 	settings |= MGMT_SETTING_DISCOVERABLE;
781 
782 	if (lmp_bredr_capable(hdev)) {
783 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
784 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
785 		settings |= MGMT_SETTING_BREDR;
786 		settings |= MGMT_SETTING_LINK_SECURITY;
787 
788 		if (lmp_ssp_capable(hdev)) {
789 			settings |= MGMT_SETTING_SSP;
790 			if (IS_ENABLED(CONFIG_BT_HS))
791 				settings |= MGMT_SETTING_HS;
792 		}
793 
794 		if (lmp_sc_capable(hdev))
795 			settings |= MGMT_SETTING_SECURE_CONN;
796 
797 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
798 			     &hdev->quirks))
799 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
800 	}
801 
802 	if (lmp_le_capable(hdev)) {
803 		settings |= MGMT_SETTING_LE;
804 		settings |= MGMT_SETTING_SECURE_CONN;
805 		settings |= MGMT_SETTING_PRIVACY;
806 		settings |= MGMT_SETTING_STATIC_ADDRESS;
807 
808 		/* When the experimental feature for LL Privacy support is
809 		 * enabled, then advertising is no longer supported.
810 		 */
811 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
812 			settings |= MGMT_SETTING_ADVERTISING;
813 	}
814 
815 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
816 	    hdev->set_bdaddr)
817 		settings |= MGMT_SETTING_CONFIGURATION;
818 
819 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
820 
821 	return settings;
822 }
823 
824 static u32 get_current_settings(struct hci_dev *hdev)
825 {
826 	u32 settings = 0;
827 
828 	if (hdev_is_powered(hdev))
829 		settings |= MGMT_SETTING_POWERED;
830 
831 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
832 		settings |= MGMT_SETTING_CONNECTABLE;
833 
834 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
835 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
836 
837 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
838 		settings |= MGMT_SETTING_DISCOVERABLE;
839 
840 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
841 		settings |= MGMT_SETTING_BONDABLE;
842 
843 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
844 		settings |= MGMT_SETTING_BREDR;
845 
846 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
847 		settings |= MGMT_SETTING_LE;
848 
849 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
850 		settings |= MGMT_SETTING_LINK_SECURITY;
851 
852 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
853 		settings |= MGMT_SETTING_SSP;
854 
855 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
856 		settings |= MGMT_SETTING_HS;
857 
858 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
859 		settings |= MGMT_SETTING_ADVERTISING;
860 
861 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
862 		settings |= MGMT_SETTING_SECURE_CONN;
863 
864 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
865 		settings |= MGMT_SETTING_DEBUG_KEYS;
866 
867 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
868 		settings |= MGMT_SETTING_PRIVACY;
869 
870 	/* The current setting for static address has two purposes. The
871 	 * first is to indicate if the static address will be used and
872 	 * the second is to indicate if it is actually set.
873 	 *
874 	 * This means if the static address is not configured, this flag
875 	 * will never be set. If the address is configured, then if the
876 	 * address is actually used decides if the flag is set or not.
877 	 *
878 	 * For single mode LE only controllers and dual-mode controllers
879 	 * with BR/EDR disabled, the existence of the static address will
880 	 * be evaluated.
881 	 */
882 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
883 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
884 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
885 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
886 			settings |= MGMT_SETTING_STATIC_ADDRESS;
887 	}
888 
889 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
890 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
891 
892 	return settings;
893 }
894 
895 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
896 {
897 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
898 }
899 
900 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
901 						  struct hci_dev *hdev,
902 						  const void *data)
903 {
904 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
905 }
906 
907 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
908 {
909 	struct mgmt_pending_cmd *cmd;
910 
911 	/* If there's a pending mgmt command the flags will not yet have
912 	 * their final values, so check for this first.
913 	 */
914 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
915 	if (cmd) {
916 		struct mgmt_mode *cp = cmd->param;
917 		if (cp->val == 0x01)
918 			return LE_AD_GENERAL;
919 		else if (cp->val == 0x02)
920 			return LE_AD_LIMITED;
921 	} else {
922 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
923 			return LE_AD_LIMITED;
924 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
925 			return LE_AD_GENERAL;
926 	}
927 
928 	return 0;
929 }
930 
931 bool mgmt_get_connectable(struct hci_dev *hdev)
932 {
933 	struct mgmt_pending_cmd *cmd;
934 
935 	/* If there's a pending mgmt command the flag will not yet have
936 	 * it's final value, so check for this first.
937 	 */
938 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
939 	if (cmd) {
940 		struct mgmt_mode *cp = cmd->param;
941 
942 		return cp->val;
943 	}
944 
945 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
946 }
947 
948 static void service_cache_off(struct work_struct *work)
949 {
950 	struct hci_dev *hdev = container_of(work, struct hci_dev,
951 					    service_cache.work);
952 	struct hci_request req;
953 
954 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
955 		return;
956 
957 	hci_req_init(&req, hdev);
958 
959 	hci_dev_lock(hdev);
960 
961 	__hci_req_update_eir(&req);
962 	__hci_req_update_class(&req);
963 
964 	hci_dev_unlock(hdev);
965 
966 	hci_req_run(&req, NULL);
967 }
968 
969 static void rpa_expired(struct work_struct *work)
970 {
971 	struct hci_dev *hdev = container_of(work, struct hci_dev,
972 					    rpa_expired.work);
973 	struct hci_request req;
974 
975 	bt_dev_dbg(hdev, "");
976 
977 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
978 
979 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
980 		return;
981 
982 	/* The generation of a new RPA and programming it into the
983 	 * controller happens in the hci_req_enable_advertising()
984 	 * function.
985 	 */
986 	hci_req_init(&req, hdev);
987 	if (ext_adv_capable(hdev))
988 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
989 	else
990 		__hci_req_enable_advertising(&req);
991 	hci_req_run(&req, NULL);
992 }
993 
994 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
995 {
996 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
997 		return;
998 
999 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1000 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1001 
1002 	/* Non-mgmt controlled devices get this bit set
1003 	 * implicitly so that pairing works for them, however
1004 	 * for mgmt we require user-space to explicitly enable
1005 	 * it
1006 	 */
1007 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1008 }
1009 
1010 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1011 				void *data, u16 data_len)
1012 {
1013 	struct mgmt_rp_read_info rp;
1014 
1015 	bt_dev_dbg(hdev, "sock %p", sk);
1016 
1017 	hci_dev_lock(hdev);
1018 
1019 	memset(&rp, 0, sizeof(rp));
1020 
1021 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1022 
1023 	rp.version = hdev->hci_ver;
1024 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1025 
1026 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1027 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1028 
1029 	memcpy(rp.dev_class, hdev->dev_class, 3);
1030 
1031 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1032 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1033 
1034 	hci_dev_unlock(hdev);
1035 
1036 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1037 				 sizeof(rp));
1038 }
1039 
1040 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1041 {
1042 	u16 eir_len = 0;
1043 	size_t name_len;
1044 
1045 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1046 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1047 					  hdev->dev_class, 3);
1048 
1049 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1050 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1051 					  hdev->appearance);
1052 
1053 	name_len = strlen(hdev->dev_name);
1054 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1055 				  hdev->dev_name, name_len);
1056 
1057 	name_len = strlen(hdev->short_name);
1058 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1059 				  hdev->short_name, name_len);
1060 
1061 	return eir_len;
1062 }
1063 
1064 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1065 				    void *data, u16 data_len)
1066 {
1067 	char buf[512];
1068 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1069 	u16 eir_len;
1070 
1071 	bt_dev_dbg(hdev, "sock %p", sk);
1072 
1073 	memset(&buf, 0, sizeof(buf));
1074 
1075 	hci_dev_lock(hdev);
1076 
1077 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1078 
1079 	rp->version = hdev->hci_ver;
1080 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1081 
1082 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1083 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1084 
1085 
1086 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1087 	rp->eir_len = cpu_to_le16(eir_len);
1088 
1089 	hci_dev_unlock(hdev);
1090 
1091 	/* If this command is called at least once, then the events
1092 	 * for class of device and local name changes are disabled
1093 	 * and only the new extended controller information event
1094 	 * is used.
1095 	 */
1096 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1097 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1098 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1099 
1100 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1101 				 sizeof(*rp) + eir_len);
1102 }
1103 
1104 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1105 {
1106 	char buf[512];
1107 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1108 	u16 eir_len;
1109 
1110 	memset(buf, 0, sizeof(buf));
1111 
1112 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1113 	ev->eir_len = cpu_to_le16(eir_len);
1114 
1115 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1116 				  sizeof(*ev) + eir_len,
1117 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1118 }
1119 
1120 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1121 {
1122 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1123 
1124 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1125 				 sizeof(settings));
1126 }
1127 
1128 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1129 {
1130 	bt_dev_dbg(hdev, "status 0x%02x", status);
1131 
1132 	if (hci_conn_count(hdev) == 0) {
1133 		cancel_delayed_work(&hdev->power_off);
1134 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1135 	}
1136 }
1137 
1138 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1139 {
1140 	struct mgmt_ev_advertising_added ev;
1141 
1142 	ev.instance = instance;
1143 
1144 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1145 }
1146 
1147 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1148 			      u8 instance)
1149 {
1150 	struct mgmt_ev_advertising_removed ev;
1151 
1152 	ev.instance = instance;
1153 
1154 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1155 }
1156 
1157 static void cancel_adv_timeout(struct hci_dev *hdev)
1158 {
1159 	if (hdev->adv_instance_timeout) {
1160 		hdev->adv_instance_timeout = 0;
1161 		cancel_delayed_work(&hdev->adv_instance_expire);
1162 	}
1163 }
1164 
1165 static int clean_up_hci_state(struct hci_dev *hdev)
1166 {
1167 	struct hci_request req;
1168 	struct hci_conn *conn;
1169 	bool discov_stopped;
1170 	int err;
1171 
1172 	hci_req_init(&req, hdev);
1173 
1174 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1175 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1176 		u8 scan = 0x00;
1177 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1178 	}
1179 
1180 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1181 
1182 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1183 		__hci_req_disable_advertising(&req);
1184 
1185 	discov_stopped = hci_req_stop_discovery(&req);
1186 
1187 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1188 		/* 0x15 == Terminated due to Power Off */
1189 		__hci_abort_conn(&req, conn, 0x15);
1190 	}
1191 
1192 	err = hci_req_run(&req, clean_up_hci_complete);
1193 	if (!err && discov_stopped)
1194 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1195 
1196 	return err;
1197 }
1198 
1199 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1200 		       u16 len)
1201 {
1202 	struct mgmt_mode *cp = data;
1203 	struct mgmt_pending_cmd *cmd;
1204 	int err;
1205 
1206 	bt_dev_dbg(hdev, "sock %p", sk);
1207 
1208 	if (cp->val != 0x00 && cp->val != 0x01)
1209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 				       MGMT_STATUS_INVALID_PARAMS);
1211 
1212 	hci_dev_lock(hdev);
1213 
1214 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1215 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 				      MGMT_STATUS_BUSY);
1217 		goto failed;
1218 	}
1219 
1220 	if (!!cp->val == hdev_is_powered(hdev)) {
1221 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1222 		goto failed;
1223 	}
1224 
1225 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1226 	if (!cmd) {
1227 		err = -ENOMEM;
1228 		goto failed;
1229 	}
1230 
1231 	if (cp->val) {
1232 		queue_work(hdev->req_workqueue, &hdev->power_on);
1233 		err = 0;
1234 	} else {
1235 		/* Disconnect connections, stop scans, etc */
1236 		err = clean_up_hci_state(hdev);
1237 		if (!err)
1238 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1239 					   HCI_POWER_OFF_TIMEOUT);
1240 
1241 		/* ENODATA means there were no HCI commands queued */
1242 		if (err == -ENODATA) {
1243 			cancel_delayed_work(&hdev->power_off);
1244 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1245 			err = 0;
1246 		}
1247 	}
1248 
1249 failed:
1250 	hci_dev_unlock(hdev);
1251 	return err;
1252 }
1253 
1254 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1255 {
1256 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1257 
1258 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1259 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1260 }
1261 
1262 int mgmt_new_settings(struct hci_dev *hdev)
1263 {
1264 	return new_settings(hdev, NULL);
1265 }
1266 
1267 struct cmd_lookup {
1268 	struct sock *sk;
1269 	struct hci_dev *hdev;
1270 	u8 mgmt_status;
1271 };
1272 
1273 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1274 {
1275 	struct cmd_lookup *match = data;
1276 
1277 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1278 
1279 	list_del(&cmd->list);
1280 
1281 	if (match->sk == NULL) {
1282 		match->sk = cmd->sk;
1283 		sock_hold(match->sk);
1284 	}
1285 
1286 	mgmt_pending_free(cmd);
1287 }
1288 
1289 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1290 {
1291 	u8 *status = data;
1292 
1293 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1294 	mgmt_pending_remove(cmd);
1295 }
1296 
1297 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1298 {
1299 	if (cmd->cmd_complete) {
1300 		u8 *status = data;
1301 
1302 		cmd->cmd_complete(cmd, *status);
1303 		mgmt_pending_remove(cmd);
1304 
1305 		return;
1306 	}
1307 
1308 	cmd_status_rsp(cmd, data);
1309 }
1310 
1311 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 {
1313 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 				 cmd->param, cmd->param_len);
1315 }
1316 
1317 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 {
1319 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 				 cmd->param, sizeof(struct mgmt_addr_info));
1321 }
1322 
1323 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1324 {
1325 	if (!lmp_bredr_capable(hdev))
1326 		return MGMT_STATUS_NOT_SUPPORTED;
1327 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1328 		return MGMT_STATUS_REJECTED;
1329 	else
1330 		return MGMT_STATUS_SUCCESS;
1331 }
1332 
1333 static u8 mgmt_le_support(struct hci_dev *hdev)
1334 {
1335 	if (!lmp_le_capable(hdev))
1336 		return MGMT_STATUS_NOT_SUPPORTED;
1337 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1338 		return MGMT_STATUS_REJECTED;
1339 	else
1340 		return MGMT_STATUS_SUCCESS;
1341 }
1342 
1343 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1344 {
1345 	struct mgmt_pending_cmd *cmd;
1346 
1347 	bt_dev_dbg(hdev, "status 0x%02x", status);
1348 
1349 	hci_dev_lock(hdev);
1350 
1351 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1352 	if (!cmd)
1353 		goto unlock;
1354 
1355 	if (status) {
1356 		u8 mgmt_err = mgmt_status(status);
1357 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1358 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1359 		goto remove_cmd;
1360 	}
1361 
1362 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1363 	    hdev->discov_timeout > 0) {
1364 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1365 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1366 	}
1367 
1368 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1369 	new_settings(hdev, cmd->sk);
1370 
1371 remove_cmd:
1372 	mgmt_pending_remove(cmd);
1373 
1374 unlock:
1375 	hci_dev_unlock(hdev);
1376 }
1377 
1378 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1379 			    u16 len)
1380 {
1381 	struct mgmt_cp_set_discoverable *cp = data;
1382 	struct mgmt_pending_cmd *cmd;
1383 	u16 timeout;
1384 	int err;
1385 
1386 	bt_dev_dbg(hdev, "sock %p", sk);
1387 
1388 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1389 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1390 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 				       MGMT_STATUS_REJECTED);
1392 
1393 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1394 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1395 				       MGMT_STATUS_INVALID_PARAMS);
1396 
1397 	timeout = __le16_to_cpu(cp->timeout);
1398 
1399 	/* Disabling discoverable requires that no timeout is set,
1400 	 * and enabling limited discoverable requires a timeout.
1401 	 */
1402 	if ((cp->val == 0x00 && timeout > 0) ||
1403 	    (cp->val == 0x02 && timeout == 0))
1404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 				       MGMT_STATUS_INVALID_PARAMS);
1406 
1407 	hci_dev_lock(hdev);
1408 
1409 	if (!hdev_is_powered(hdev) && timeout > 0) {
1410 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 				      MGMT_STATUS_NOT_POWERED);
1412 		goto failed;
1413 	}
1414 
1415 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1416 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1417 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 				      MGMT_STATUS_BUSY);
1419 		goto failed;
1420 	}
1421 
1422 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1423 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 				      MGMT_STATUS_REJECTED);
1425 		goto failed;
1426 	}
1427 
1428 	if (hdev->advertising_paused) {
1429 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 				      MGMT_STATUS_BUSY);
1431 		goto failed;
1432 	}
1433 
1434 	if (!hdev_is_powered(hdev)) {
1435 		bool changed = false;
1436 
1437 		/* Setting limited discoverable when powered off is
1438 		 * not a valid operation since it requires a timeout
1439 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 		 */
1441 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1442 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1443 			changed = true;
1444 		}
1445 
1446 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1447 		if (err < 0)
1448 			goto failed;
1449 
1450 		if (changed)
1451 			err = new_settings(hdev, sk);
1452 
1453 		goto failed;
1454 	}
1455 
1456 	/* If the current mode is the same, then just update the timeout
1457 	 * value with the new value. And if only the timeout gets updated,
1458 	 * then no need for any HCI transactions.
1459 	 */
1460 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1461 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1462 						   HCI_LIMITED_DISCOVERABLE)) {
1463 		cancel_delayed_work(&hdev->discov_off);
1464 		hdev->discov_timeout = timeout;
1465 
1466 		if (cp->val && hdev->discov_timeout > 0) {
1467 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1468 			queue_delayed_work(hdev->req_workqueue,
1469 					   &hdev->discov_off, to);
1470 		}
1471 
1472 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1473 		goto failed;
1474 	}
1475 
1476 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1477 	if (!cmd) {
1478 		err = -ENOMEM;
1479 		goto failed;
1480 	}
1481 
1482 	/* Cancel any potential discoverable timeout that might be
1483 	 * still active and store new timeout value. The arming of
1484 	 * the timeout happens in the complete handler.
1485 	 */
1486 	cancel_delayed_work(&hdev->discov_off);
1487 	hdev->discov_timeout = timeout;
1488 
1489 	if (cp->val)
1490 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1491 	else
1492 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1493 
1494 	/* Limited discoverable mode */
1495 	if (cp->val == 0x02)
1496 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1497 	else
1498 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499 
1500 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1501 	err = 0;
1502 
1503 failed:
1504 	hci_dev_unlock(hdev);
1505 	return err;
1506 }
1507 
1508 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 {
1510 	struct mgmt_pending_cmd *cmd;
1511 
1512 	bt_dev_dbg(hdev, "status 0x%02x", status);
1513 
1514 	hci_dev_lock(hdev);
1515 
1516 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1517 	if (!cmd)
1518 		goto unlock;
1519 
1520 	if (status) {
1521 		u8 mgmt_err = mgmt_status(status);
1522 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1523 		goto remove_cmd;
1524 	}
1525 
1526 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1527 	new_settings(hdev, cmd->sk);
1528 
1529 remove_cmd:
1530 	mgmt_pending_remove(cmd);
1531 
1532 unlock:
1533 	hci_dev_unlock(hdev);
1534 }
1535 
1536 static int set_connectable_update_settings(struct hci_dev *hdev,
1537 					   struct sock *sk, u8 val)
1538 {
1539 	bool changed = false;
1540 	int err;
1541 
1542 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1543 		changed = true;
1544 
1545 	if (val) {
1546 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1547 	} else {
1548 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1549 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1550 	}
1551 
1552 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1553 	if (err < 0)
1554 		return err;
1555 
1556 	if (changed) {
1557 		hci_req_update_scan(hdev);
1558 		hci_update_background_scan(hdev);
1559 		return new_settings(hdev, sk);
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1566 			   u16 len)
1567 {
1568 	struct mgmt_mode *cp = data;
1569 	struct mgmt_pending_cmd *cmd;
1570 	int err;
1571 
1572 	bt_dev_dbg(hdev, "sock %p", sk);
1573 
1574 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1575 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1577 				       MGMT_STATUS_REJECTED);
1578 
1579 	if (cp->val != 0x00 && cp->val != 0x01)
1580 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1581 				       MGMT_STATUS_INVALID_PARAMS);
1582 
1583 	hci_dev_lock(hdev);
1584 
1585 	if (!hdev_is_powered(hdev)) {
1586 		err = set_connectable_update_settings(hdev, sk, cp->val);
1587 		goto failed;
1588 	}
1589 
1590 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1593 				      MGMT_STATUS_BUSY);
1594 		goto failed;
1595 	}
1596 
1597 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1598 	if (!cmd) {
1599 		err = -ENOMEM;
1600 		goto failed;
1601 	}
1602 
1603 	if (cp->val) {
1604 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 	} else {
1606 		if (hdev->discov_timeout > 0)
1607 			cancel_delayed_work(&hdev->discov_off);
1608 
1609 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1610 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1611 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1612 	}
1613 
1614 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1615 	err = 0;
1616 
1617 failed:
1618 	hci_dev_unlock(hdev);
1619 	return err;
1620 }
1621 
1622 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1623 			u16 len)
1624 {
1625 	struct mgmt_mode *cp = data;
1626 	bool changed;
1627 	int err;
1628 
1629 	bt_dev_dbg(hdev, "sock %p", sk);
1630 
1631 	if (cp->val != 0x00 && cp->val != 0x01)
1632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1633 				       MGMT_STATUS_INVALID_PARAMS);
1634 
1635 	hci_dev_lock(hdev);
1636 
1637 	if (cp->val)
1638 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1639 	else
1640 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1641 
1642 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1643 	if (err < 0)
1644 		goto unlock;
1645 
1646 	if (changed) {
1647 		/* In limited privacy mode the change of bondable mode
1648 		 * may affect the local advertising address.
1649 		 */
1650 		if (hdev_is_powered(hdev) &&
1651 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1652 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1653 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1654 			queue_work(hdev->req_workqueue,
1655 				   &hdev->discoverable_update);
1656 
1657 		err = new_settings(hdev, sk);
1658 	}
1659 
1660 unlock:
1661 	hci_dev_unlock(hdev);
1662 	return err;
1663 }
1664 
1665 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1666 			     u16 len)
1667 {
1668 	struct mgmt_mode *cp = data;
1669 	struct mgmt_pending_cmd *cmd;
1670 	u8 val, status;
1671 	int err;
1672 
1673 	bt_dev_dbg(hdev, "sock %p", sk);
1674 
1675 	status = mgmt_bredr_support(hdev);
1676 	if (status)
1677 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1678 				       status);
1679 
1680 	if (cp->val != 0x00 && cp->val != 0x01)
1681 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1682 				       MGMT_STATUS_INVALID_PARAMS);
1683 
1684 	hci_dev_lock(hdev);
1685 
1686 	if (!hdev_is_powered(hdev)) {
1687 		bool changed = false;
1688 
1689 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1690 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1691 			changed = true;
1692 		}
1693 
1694 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1695 		if (err < 0)
1696 			goto failed;
1697 
1698 		if (changed)
1699 			err = new_settings(hdev, sk);
1700 
1701 		goto failed;
1702 	}
1703 
1704 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1705 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1706 				      MGMT_STATUS_BUSY);
1707 		goto failed;
1708 	}
1709 
1710 	val = !!cp->val;
1711 
1712 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1713 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1714 		goto failed;
1715 	}
1716 
1717 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1718 	if (!cmd) {
1719 		err = -ENOMEM;
1720 		goto failed;
1721 	}
1722 
1723 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1724 	if (err < 0) {
1725 		mgmt_pending_remove(cmd);
1726 		goto failed;
1727 	}
1728 
1729 failed:
1730 	hci_dev_unlock(hdev);
1731 	return err;
1732 }
1733 
1734 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1735 {
1736 	struct mgmt_mode *cp = data;
1737 	struct mgmt_pending_cmd *cmd;
1738 	u8 status;
1739 	int err;
1740 
1741 	bt_dev_dbg(hdev, "sock %p", sk);
1742 
1743 	status = mgmt_bredr_support(hdev);
1744 	if (status)
1745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1746 
1747 	if (!lmp_ssp_capable(hdev))
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1749 				       MGMT_STATUS_NOT_SUPPORTED);
1750 
1751 	if (cp->val != 0x00 && cp->val != 0x01)
1752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1753 				       MGMT_STATUS_INVALID_PARAMS);
1754 
1755 	hci_dev_lock(hdev);
1756 
1757 	if (!hdev_is_powered(hdev)) {
1758 		bool changed;
1759 
1760 		if (cp->val) {
1761 			changed = !hci_dev_test_and_set_flag(hdev,
1762 							     HCI_SSP_ENABLED);
1763 		} else {
1764 			changed = hci_dev_test_and_clear_flag(hdev,
1765 							      HCI_SSP_ENABLED);
1766 			if (!changed)
1767 				changed = hci_dev_test_and_clear_flag(hdev,
1768 								      HCI_HS_ENABLED);
1769 			else
1770 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1771 		}
1772 
1773 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1774 		if (err < 0)
1775 			goto failed;
1776 
1777 		if (changed)
1778 			err = new_settings(hdev, sk);
1779 
1780 		goto failed;
1781 	}
1782 
1783 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1784 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1785 				      MGMT_STATUS_BUSY);
1786 		goto failed;
1787 	}
1788 
1789 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1790 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1791 		goto failed;
1792 	}
1793 
1794 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1795 	if (!cmd) {
1796 		err = -ENOMEM;
1797 		goto failed;
1798 	}
1799 
1800 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1801 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1802 			     sizeof(cp->val), &cp->val);
1803 
1804 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1805 	if (err < 0) {
1806 		mgmt_pending_remove(cmd);
1807 		goto failed;
1808 	}
1809 
1810 failed:
1811 	hci_dev_unlock(hdev);
1812 	return err;
1813 }
1814 
1815 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1816 {
1817 	struct mgmt_mode *cp = data;
1818 	bool changed;
1819 	u8 status;
1820 	int err;
1821 
1822 	bt_dev_dbg(hdev, "sock %p", sk);
1823 
1824 	if (!IS_ENABLED(CONFIG_BT_HS))
1825 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 				       MGMT_STATUS_NOT_SUPPORTED);
1827 
1828 	status = mgmt_bredr_support(hdev);
1829 	if (status)
1830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1831 
1832 	if (!lmp_ssp_capable(hdev))
1833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1834 				       MGMT_STATUS_NOT_SUPPORTED);
1835 
1836 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 				       MGMT_STATUS_REJECTED);
1839 
1840 	if (cp->val != 0x00 && cp->val != 0x01)
1841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 				       MGMT_STATUS_INVALID_PARAMS);
1843 
1844 	hci_dev_lock(hdev);
1845 
1846 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1847 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 				      MGMT_STATUS_BUSY);
1849 		goto unlock;
1850 	}
1851 
1852 	if (cp->val) {
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1854 	} else {
1855 		if (hdev_is_powered(hdev)) {
1856 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1857 					      MGMT_STATUS_REJECTED);
1858 			goto unlock;
1859 		}
1860 
1861 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1862 	}
1863 
1864 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1865 	if (err < 0)
1866 		goto unlock;
1867 
1868 	if (changed)
1869 		err = new_settings(hdev, sk);
1870 
1871 unlock:
1872 	hci_dev_unlock(hdev);
1873 	return err;
1874 }
1875 
1876 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1877 {
1878 	struct cmd_lookup match = { NULL, hdev };
1879 
1880 	hci_dev_lock(hdev);
1881 
1882 	if (status) {
1883 		u8 mgmt_err = mgmt_status(status);
1884 
1885 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1886 				     &mgmt_err);
1887 		goto unlock;
1888 	}
1889 
1890 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1891 
1892 	new_settings(hdev, match.sk);
1893 
1894 	if (match.sk)
1895 		sock_put(match.sk);
1896 
1897 	/* Make sure the controller has a good default for
1898 	 * advertising data. Restrict the update to when LE
1899 	 * has actually been enabled. During power on, the
1900 	 * update in powered_update_hci will take care of it.
1901 	 */
1902 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1903 		struct hci_request req;
1904 		hci_req_init(&req, hdev);
1905 		if (ext_adv_capable(hdev)) {
1906 			int err;
1907 
1908 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1909 			if (!err)
1910 				__hci_req_update_scan_rsp_data(&req, 0x00);
1911 		} else {
1912 			__hci_req_update_adv_data(&req, 0x00);
1913 			__hci_req_update_scan_rsp_data(&req, 0x00);
1914 		}
1915 		hci_req_run(&req, NULL);
1916 		hci_update_background_scan(hdev);
1917 	}
1918 
1919 unlock:
1920 	hci_dev_unlock(hdev);
1921 }
1922 
1923 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1924 {
1925 	struct mgmt_mode *cp = data;
1926 	struct hci_cp_write_le_host_supported hci_cp;
1927 	struct mgmt_pending_cmd *cmd;
1928 	struct hci_request req;
1929 	int err;
1930 	u8 val, enabled;
1931 
1932 	bt_dev_dbg(hdev, "sock %p", sk);
1933 
1934 	if (!lmp_le_capable(hdev))
1935 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1936 				       MGMT_STATUS_NOT_SUPPORTED);
1937 
1938 	if (cp->val != 0x00 && cp->val != 0x01)
1939 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 				       MGMT_STATUS_INVALID_PARAMS);
1941 
1942 	/* Bluetooth single mode LE only controllers or dual-mode
1943 	 * controllers configured as LE only devices, do not allow
1944 	 * switching LE off. These have either LE enabled explicitly
1945 	 * or BR/EDR has been previously switched off.
1946 	 *
1947 	 * When trying to enable an already enabled LE, then gracefully
1948 	 * send a positive response. Trying to disable it however will
1949 	 * result into rejection.
1950 	 */
1951 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1952 		if (cp->val == 0x01)
1953 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1954 
1955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1956 				       MGMT_STATUS_REJECTED);
1957 	}
1958 
1959 	hci_dev_lock(hdev);
1960 
1961 	val = !!cp->val;
1962 	enabled = lmp_host_le_capable(hdev);
1963 
1964 	if (!val)
1965 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1966 
1967 	if (!hdev_is_powered(hdev) || val == enabled) {
1968 		bool changed = false;
1969 
1970 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1971 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1972 			changed = true;
1973 		}
1974 
1975 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1976 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1977 			changed = true;
1978 		}
1979 
1980 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1981 		if (err < 0)
1982 			goto unlock;
1983 
1984 		if (changed)
1985 			err = new_settings(hdev, sk);
1986 
1987 		goto unlock;
1988 	}
1989 
1990 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1991 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1992 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1993 				      MGMT_STATUS_BUSY);
1994 		goto unlock;
1995 	}
1996 
1997 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1998 	if (!cmd) {
1999 		err = -ENOMEM;
2000 		goto unlock;
2001 	}
2002 
2003 	hci_req_init(&req, hdev);
2004 
2005 	memset(&hci_cp, 0, sizeof(hci_cp));
2006 
2007 	if (val) {
2008 		hci_cp.le = val;
2009 		hci_cp.simul = 0x00;
2010 	} else {
2011 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2012 			__hci_req_disable_advertising(&req);
2013 
2014 		if (ext_adv_capable(hdev))
2015 			__hci_req_clear_ext_adv_sets(&req);
2016 	}
2017 
2018 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2019 		    &hci_cp);
2020 
2021 	err = hci_req_run(&req, le_enable_complete);
2022 	if (err < 0)
2023 		mgmt_pending_remove(cmd);
2024 
2025 unlock:
2026 	hci_dev_unlock(hdev);
2027 	return err;
2028 }
2029 
2030 /* This is a helper function to test for pending mgmt commands that can
2031  * cause CoD or EIR HCI commands. We can only allow one such pending
2032  * mgmt command at a time since otherwise we cannot easily track what
2033  * the current values are, will be, and based on that calculate if a new
2034  * HCI command needs to be sent and if yes with what value.
2035  */
2036 static bool pending_eir_or_class(struct hci_dev *hdev)
2037 {
2038 	struct mgmt_pending_cmd *cmd;
2039 
2040 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2041 		switch (cmd->opcode) {
2042 		case MGMT_OP_ADD_UUID:
2043 		case MGMT_OP_REMOVE_UUID:
2044 		case MGMT_OP_SET_DEV_CLASS:
2045 		case MGMT_OP_SET_POWERED:
2046 			return true;
2047 		}
2048 	}
2049 
2050 	return false;
2051 }
2052 
2053 static const u8 bluetooth_base_uuid[] = {
2054 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2055 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2056 };
2057 
2058 static u8 get_uuid_size(const u8 *uuid)
2059 {
2060 	u32 val;
2061 
2062 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2063 		return 128;
2064 
2065 	val = get_unaligned_le32(&uuid[12]);
2066 	if (val > 0xffff)
2067 		return 32;
2068 
2069 	return 16;
2070 }
2071 
2072 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2073 {
2074 	struct mgmt_pending_cmd *cmd;
2075 
2076 	hci_dev_lock(hdev);
2077 
2078 	cmd = pending_find(mgmt_op, hdev);
2079 	if (!cmd)
2080 		goto unlock;
2081 
2082 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2083 			  mgmt_status(status), hdev->dev_class, 3);
2084 
2085 	mgmt_pending_remove(cmd);
2086 
2087 unlock:
2088 	hci_dev_unlock(hdev);
2089 }
2090 
2091 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2092 {
2093 	bt_dev_dbg(hdev, "status 0x%02x", status);
2094 
2095 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2096 }
2097 
2098 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2099 {
2100 	struct mgmt_cp_add_uuid *cp = data;
2101 	struct mgmt_pending_cmd *cmd;
2102 	struct hci_request req;
2103 	struct bt_uuid *uuid;
2104 	int err;
2105 
2106 	bt_dev_dbg(hdev, "sock %p", sk);
2107 
2108 	hci_dev_lock(hdev);
2109 
2110 	if (pending_eir_or_class(hdev)) {
2111 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2112 				      MGMT_STATUS_BUSY);
2113 		goto failed;
2114 	}
2115 
2116 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2117 	if (!uuid) {
2118 		err = -ENOMEM;
2119 		goto failed;
2120 	}
2121 
2122 	memcpy(uuid->uuid, cp->uuid, 16);
2123 	uuid->svc_hint = cp->svc_hint;
2124 	uuid->size = get_uuid_size(cp->uuid);
2125 
2126 	list_add_tail(&uuid->list, &hdev->uuids);
2127 
2128 	hci_req_init(&req, hdev);
2129 
2130 	__hci_req_update_class(&req);
2131 	__hci_req_update_eir(&req);
2132 
2133 	err = hci_req_run(&req, add_uuid_complete);
2134 	if (err < 0) {
2135 		if (err != -ENODATA)
2136 			goto failed;
2137 
2138 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2139 					hdev->dev_class, 3);
2140 		goto failed;
2141 	}
2142 
2143 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2144 	if (!cmd) {
2145 		err = -ENOMEM;
2146 		goto failed;
2147 	}
2148 
2149 	err = 0;
2150 
2151 failed:
2152 	hci_dev_unlock(hdev);
2153 	return err;
2154 }
2155 
2156 static bool enable_service_cache(struct hci_dev *hdev)
2157 {
2158 	if (!hdev_is_powered(hdev))
2159 		return false;
2160 
2161 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2162 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2163 				   CACHE_TIMEOUT);
2164 		return true;
2165 	}
2166 
2167 	return false;
2168 }
2169 
2170 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2171 {
2172 	bt_dev_dbg(hdev, "status 0x%02x", status);
2173 
2174 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2175 }
2176 
2177 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2178 		       u16 len)
2179 {
2180 	struct mgmt_cp_remove_uuid *cp = data;
2181 	struct mgmt_pending_cmd *cmd;
2182 	struct bt_uuid *match, *tmp;
2183 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2184 	struct hci_request req;
2185 	int err, found;
2186 
2187 	bt_dev_dbg(hdev, "sock %p", sk);
2188 
2189 	hci_dev_lock(hdev);
2190 
2191 	if (pending_eir_or_class(hdev)) {
2192 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2193 				      MGMT_STATUS_BUSY);
2194 		goto unlock;
2195 	}
2196 
2197 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2198 		hci_uuids_clear(hdev);
2199 
2200 		if (enable_service_cache(hdev)) {
2201 			err = mgmt_cmd_complete(sk, hdev->id,
2202 						MGMT_OP_REMOVE_UUID,
2203 						0, hdev->dev_class, 3);
2204 			goto unlock;
2205 		}
2206 
2207 		goto update_class;
2208 	}
2209 
2210 	found = 0;
2211 
2212 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2213 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2214 			continue;
2215 
2216 		list_del(&match->list);
2217 		kfree(match);
2218 		found++;
2219 	}
2220 
2221 	if (found == 0) {
2222 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2223 				      MGMT_STATUS_INVALID_PARAMS);
2224 		goto unlock;
2225 	}
2226 
2227 update_class:
2228 	hci_req_init(&req, hdev);
2229 
2230 	__hci_req_update_class(&req);
2231 	__hci_req_update_eir(&req);
2232 
2233 	err = hci_req_run(&req, remove_uuid_complete);
2234 	if (err < 0) {
2235 		if (err != -ENODATA)
2236 			goto unlock;
2237 
2238 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2239 					hdev->dev_class, 3);
2240 		goto unlock;
2241 	}
2242 
2243 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2244 	if (!cmd) {
2245 		err = -ENOMEM;
2246 		goto unlock;
2247 	}
2248 
2249 	err = 0;
2250 
2251 unlock:
2252 	hci_dev_unlock(hdev);
2253 	return err;
2254 }
2255 
2256 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2257 {
2258 	bt_dev_dbg(hdev, "status 0x%02x", status);
2259 
2260 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2261 }
2262 
2263 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2264 			 u16 len)
2265 {
2266 	struct mgmt_cp_set_dev_class *cp = data;
2267 	struct mgmt_pending_cmd *cmd;
2268 	struct hci_request req;
2269 	int err;
2270 
2271 	bt_dev_dbg(hdev, "sock %p", sk);
2272 
2273 	if (!lmp_bredr_capable(hdev))
2274 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2275 				       MGMT_STATUS_NOT_SUPPORTED);
2276 
2277 	hci_dev_lock(hdev);
2278 
2279 	if (pending_eir_or_class(hdev)) {
2280 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 				      MGMT_STATUS_BUSY);
2282 		goto unlock;
2283 	}
2284 
2285 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2286 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 				      MGMT_STATUS_INVALID_PARAMS);
2288 		goto unlock;
2289 	}
2290 
2291 	hdev->major_class = cp->major;
2292 	hdev->minor_class = cp->minor;
2293 
2294 	if (!hdev_is_powered(hdev)) {
2295 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2296 					hdev->dev_class, 3);
2297 		goto unlock;
2298 	}
2299 
2300 	hci_req_init(&req, hdev);
2301 
2302 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2303 		hci_dev_unlock(hdev);
2304 		cancel_delayed_work_sync(&hdev->service_cache);
2305 		hci_dev_lock(hdev);
2306 		__hci_req_update_eir(&req);
2307 	}
2308 
2309 	__hci_req_update_class(&req);
2310 
2311 	err = hci_req_run(&req, set_class_complete);
2312 	if (err < 0) {
2313 		if (err != -ENODATA)
2314 			goto unlock;
2315 
2316 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2317 					hdev->dev_class, 3);
2318 		goto unlock;
2319 	}
2320 
2321 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2322 	if (!cmd) {
2323 		err = -ENOMEM;
2324 		goto unlock;
2325 	}
2326 
2327 	err = 0;
2328 
2329 unlock:
2330 	hci_dev_unlock(hdev);
2331 	return err;
2332 }
2333 
2334 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2335 			  u16 len)
2336 {
2337 	struct mgmt_cp_load_link_keys *cp = data;
2338 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2339 				   sizeof(struct mgmt_link_key_info));
2340 	u16 key_count, expected_len;
2341 	bool changed;
2342 	int i;
2343 
2344 	bt_dev_dbg(hdev, "sock %p", sk);
2345 
2346 	if (!lmp_bredr_capable(hdev))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 				       MGMT_STATUS_NOT_SUPPORTED);
2349 
2350 	key_count = __le16_to_cpu(cp->key_count);
2351 	if (key_count > max_key_count) {
2352 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2353 			   key_count);
2354 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2355 				       MGMT_STATUS_INVALID_PARAMS);
2356 	}
2357 
2358 	expected_len = struct_size(cp, keys, key_count);
2359 	if (expected_len != len) {
2360 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2361 			   expected_len, len);
2362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 				       MGMT_STATUS_INVALID_PARAMS);
2364 	}
2365 
2366 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2368 				       MGMT_STATUS_INVALID_PARAMS);
2369 
2370 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2371 		   key_count);
2372 
2373 	for (i = 0; i < key_count; i++) {
2374 		struct mgmt_link_key_info *key = &cp->keys[i];
2375 
2376 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2377 			return mgmt_cmd_status(sk, hdev->id,
2378 					       MGMT_OP_LOAD_LINK_KEYS,
2379 					       MGMT_STATUS_INVALID_PARAMS);
2380 	}
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	hci_link_keys_clear(hdev);
2385 
2386 	if (cp->debug_keys)
2387 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2388 	else
2389 		changed = hci_dev_test_and_clear_flag(hdev,
2390 						      HCI_KEEP_DEBUG_KEYS);
2391 
2392 	if (changed)
2393 		new_settings(hdev, NULL);
2394 
2395 	for (i = 0; i < key_count; i++) {
2396 		struct mgmt_link_key_info *key = &cp->keys[i];
2397 
2398 		if (hci_is_blocked_key(hdev,
2399 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2400 				       key->val)) {
2401 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2402 				    &key->addr.bdaddr);
2403 			continue;
2404 		}
2405 
2406 		/* Always ignore debug keys and require a new pairing if
2407 		 * the user wants to use them.
2408 		 */
2409 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2410 			continue;
2411 
2412 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2413 				 key->type, key->pin_len, NULL);
2414 	}
2415 
2416 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2417 
2418 	hci_dev_unlock(hdev);
2419 
2420 	return 0;
2421 }
2422 
2423 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2424 			   u8 addr_type, struct sock *skip_sk)
2425 {
2426 	struct mgmt_ev_device_unpaired ev;
2427 
2428 	bacpy(&ev.addr.bdaddr, bdaddr);
2429 	ev.addr.type = addr_type;
2430 
2431 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2432 			  skip_sk);
2433 }
2434 
2435 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2436 			 u16 len)
2437 {
2438 	struct mgmt_cp_unpair_device *cp = data;
2439 	struct mgmt_rp_unpair_device rp;
2440 	struct hci_conn_params *params;
2441 	struct mgmt_pending_cmd *cmd;
2442 	struct hci_conn *conn;
2443 	u8 addr_type;
2444 	int err;
2445 
2446 	memset(&rp, 0, sizeof(rp));
2447 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2448 	rp.addr.type = cp->addr.type;
2449 
2450 	if (!bdaddr_type_is_valid(cp->addr.type))
2451 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2452 					 MGMT_STATUS_INVALID_PARAMS,
2453 					 &rp, sizeof(rp));
2454 
2455 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2456 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2457 					 MGMT_STATUS_INVALID_PARAMS,
2458 					 &rp, sizeof(rp));
2459 
2460 	hci_dev_lock(hdev);
2461 
2462 	if (!hdev_is_powered(hdev)) {
2463 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2464 					MGMT_STATUS_NOT_POWERED, &rp,
2465 					sizeof(rp));
2466 		goto unlock;
2467 	}
2468 
2469 	if (cp->addr.type == BDADDR_BREDR) {
2470 		/* If disconnection is requested, then look up the
2471 		 * connection. If the remote device is connected, it
2472 		 * will be later used to terminate the link.
2473 		 *
2474 		 * Setting it to NULL explicitly will cause no
2475 		 * termination of the link.
2476 		 */
2477 		if (cp->disconnect)
2478 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2479 						       &cp->addr.bdaddr);
2480 		else
2481 			conn = NULL;
2482 
2483 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2484 		if (err < 0) {
2485 			err = mgmt_cmd_complete(sk, hdev->id,
2486 						MGMT_OP_UNPAIR_DEVICE,
2487 						MGMT_STATUS_NOT_PAIRED, &rp,
2488 						sizeof(rp));
2489 			goto unlock;
2490 		}
2491 
2492 		goto done;
2493 	}
2494 
2495 	/* LE address type */
2496 	addr_type = le_addr_type(cp->addr.type);
2497 
2498 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2499 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2500 	if (err < 0) {
2501 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2502 					MGMT_STATUS_NOT_PAIRED, &rp,
2503 					sizeof(rp));
2504 		goto unlock;
2505 	}
2506 
2507 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2508 	if (!conn) {
2509 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2510 		goto done;
2511 	}
2512 
2513 
2514 	/* Defer clearing up the connection parameters until closing to
2515 	 * give a chance of keeping them if a repairing happens.
2516 	 */
2517 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2518 
2519 	/* Disable auto-connection parameters if present */
2520 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2521 	if (params) {
2522 		if (params->explicit_connect)
2523 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2524 		else
2525 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2526 	}
2527 
2528 	/* If disconnection is not requested, then clear the connection
2529 	 * variable so that the link is not terminated.
2530 	 */
2531 	if (!cp->disconnect)
2532 		conn = NULL;
2533 
2534 done:
2535 	/* If the connection variable is set, then termination of the
2536 	 * link is requested.
2537 	 */
2538 	if (!conn) {
2539 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2540 					&rp, sizeof(rp));
2541 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2542 		goto unlock;
2543 	}
2544 
2545 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2546 			       sizeof(*cp));
2547 	if (!cmd) {
2548 		err = -ENOMEM;
2549 		goto unlock;
2550 	}
2551 
2552 	cmd->cmd_complete = addr_cmd_complete;
2553 
2554 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2555 	if (err < 0)
2556 		mgmt_pending_remove(cmd);
2557 
2558 unlock:
2559 	hci_dev_unlock(hdev);
2560 	return err;
2561 }
2562 
2563 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2564 		      u16 len)
2565 {
2566 	struct mgmt_cp_disconnect *cp = data;
2567 	struct mgmt_rp_disconnect rp;
2568 	struct mgmt_pending_cmd *cmd;
2569 	struct hci_conn *conn;
2570 	int err;
2571 
2572 	bt_dev_dbg(hdev, "sock %p", sk);
2573 
2574 	memset(&rp, 0, sizeof(rp));
2575 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2576 	rp.addr.type = cp->addr.type;
2577 
2578 	if (!bdaddr_type_is_valid(cp->addr.type))
2579 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2580 					 MGMT_STATUS_INVALID_PARAMS,
2581 					 &rp, sizeof(rp));
2582 
2583 	hci_dev_lock(hdev);
2584 
2585 	if (!test_bit(HCI_UP, &hdev->flags)) {
2586 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2587 					MGMT_STATUS_NOT_POWERED, &rp,
2588 					sizeof(rp));
2589 		goto failed;
2590 	}
2591 
2592 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2593 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2594 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2595 		goto failed;
2596 	}
2597 
2598 	if (cp->addr.type == BDADDR_BREDR)
2599 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2600 					       &cp->addr.bdaddr);
2601 	else
2602 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2603 					       le_addr_type(cp->addr.type));
2604 
2605 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2606 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2607 					MGMT_STATUS_NOT_CONNECTED, &rp,
2608 					sizeof(rp));
2609 		goto failed;
2610 	}
2611 
2612 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2613 	if (!cmd) {
2614 		err = -ENOMEM;
2615 		goto failed;
2616 	}
2617 
2618 	cmd->cmd_complete = generic_cmd_complete;
2619 
2620 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2621 	if (err < 0)
2622 		mgmt_pending_remove(cmd);
2623 
2624 failed:
2625 	hci_dev_unlock(hdev);
2626 	return err;
2627 }
2628 
2629 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2630 {
2631 	switch (link_type) {
2632 	case LE_LINK:
2633 		switch (addr_type) {
2634 		case ADDR_LE_DEV_PUBLIC:
2635 			return BDADDR_LE_PUBLIC;
2636 
2637 		default:
2638 			/* Fallback to LE Random address type */
2639 			return BDADDR_LE_RANDOM;
2640 		}
2641 
2642 	default:
2643 		/* Fallback to BR/EDR type */
2644 		return BDADDR_BREDR;
2645 	}
2646 }
2647 
2648 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2649 			   u16 data_len)
2650 {
2651 	struct mgmt_rp_get_connections *rp;
2652 	struct hci_conn *c;
2653 	int err;
2654 	u16 i;
2655 
2656 	bt_dev_dbg(hdev, "sock %p", sk);
2657 
2658 	hci_dev_lock(hdev);
2659 
2660 	if (!hdev_is_powered(hdev)) {
2661 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2662 				      MGMT_STATUS_NOT_POWERED);
2663 		goto unlock;
2664 	}
2665 
2666 	i = 0;
2667 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2668 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2669 			i++;
2670 	}
2671 
2672 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2673 	if (!rp) {
2674 		err = -ENOMEM;
2675 		goto unlock;
2676 	}
2677 
2678 	i = 0;
2679 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2680 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2681 			continue;
2682 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2683 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2684 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2685 			continue;
2686 		i++;
2687 	}
2688 
2689 	rp->conn_count = cpu_to_le16(i);
2690 
2691 	/* Recalculate length in case of filtered SCO connections, etc */
2692 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2693 				struct_size(rp, addr, i));
2694 
2695 	kfree(rp);
2696 
2697 unlock:
2698 	hci_dev_unlock(hdev);
2699 	return err;
2700 }
2701 
2702 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2703 				   struct mgmt_cp_pin_code_neg_reply *cp)
2704 {
2705 	struct mgmt_pending_cmd *cmd;
2706 	int err;
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2709 			       sizeof(*cp));
2710 	if (!cmd)
2711 		return -ENOMEM;
2712 
2713 	cmd->cmd_complete = addr_cmd_complete;
2714 
2715 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2716 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 	return err;
2721 }
2722 
2723 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2724 			  u16 len)
2725 {
2726 	struct hci_conn *conn;
2727 	struct mgmt_cp_pin_code_reply *cp = data;
2728 	struct hci_cp_pin_code_reply reply;
2729 	struct mgmt_pending_cmd *cmd;
2730 	int err;
2731 
2732 	bt_dev_dbg(hdev, "sock %p", sk);
2733 
2734 	hci_dev_lock(hdev);
2735 
2736 	if (!hdev_is_powered(hdev)) {
2737 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2738 				      MGMT_STATUS_NOT_POWERED);
2739 		goto failed;
2740 	}
2741 
2742 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2743 	if (!conn) {
2744 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2745 				      MGMT_STATUS_NOT_CONNECTED);
2746 		goto failed;
2747 	}
2748 
2749 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2750 		struct mgmt_cp_pin_code_neg_reply ncp;
2751 
2752 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2753 
2754 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2755 
2756 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2757 		if (err >= 0)
2758 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2759 					      MGMT_STATUS_INVALID_PARAMS);
2760 
2761 		goto failed;
2762 	}
2763 
2764 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2765 	if (!cmd) {
2766 		err = -ENOMEM;
2767 		goto failed;
2768 	}
2769 
2770 	cmd->cmd_complete = addr_cmd_complete;
2771 
2772 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2773 	reply.pin_len = cp->pin_len;
2774 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2775 
2776 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2777 	if (err < 0)
2778 		mgmt_pending_remove(cmd);
2779 
2780 failed:
2781 	hci_dev_unlock(hdev);
2782 	return err;
2783 }
2784 
2785 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2786 			     u16 len)
2787 {
2788 	struct mgmt_cp_set_io_capability *cp = data;
2789 
2790 	bt_dev_dbg(hdev, "sock %p", sk);
2791 
2792 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2794 				       MGMT_STATUS_INVALID_PARAMS);
2795 
2796 	hci_dev_lock(hdev);
2797 
2798 	hdev->io_capability = cp->io_capability;
2799 
2800 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2801 
2802 	hci_dev_unlock(hdev);
2803 
2804 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2805 				 NULL, 0);
2806 }
2807 
2808 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2809 {
2810 	struct hci_dev *hdev = conn->hdev;
2811 	struct mgmt_pending_cmd *cmd;
2812 
2813 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2814 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2815 			continue;
2816 
2817 		if (cmd->user_data != conn)
2818 			continue;
2819 
2820 		return cmd;
2821 	}
2822 
2823 	return NULL;
2824 }
2825 
2826 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2827 {
2828 	struct mgmt_rp_pair_device rp;
2829 	struct hci_conn *conn = cmd->user_data;
2830 	int err;
2831 
2832 	bacpy(&rp.addr.bdaddr, &conn->dst);
2833 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2834 
2835 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2836 				status, &rp, sizeof(rp));
2837 
2838 	/* So we don't get further callbacks for this connection */
2839 	conn->connect_cfm_cb = NULL;
2840 	conn->security_cfm_cb = NULL;
2841 	conn->disconn_cfm_cb = NULL;
2842 
2843 	hci_conn_drop(conn);
2844 
2845 	/* The device is paired so there is no need to remove
2846 	 * its connection parameters anymore.
2847 	 */
2848 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2849 
2850 	hci_conn_put(conn);
2851 
2852 	return err;
2853 }
2854 
2855 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2856 {
2857 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2858 	struct mgmt_pending_cmd *cmd;
2859 
2860 	cmd = find_pairing(conn);
2861 	if (cmd) {
2862 		cmd->cmd_complete(cmd, status);
2863 		mgmt_pending_remove(cmd);
2864 	}
2865 }
2866 
2867 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2868 {
2869 	struct mgmt_pending_cmd *cmd;
2870 
2871 	BT_DBG("status %u", status);
2872 
2873 	cmd = find_pairing(conn);
2874 	if (!cmd) {
2875 		BT_DBG("Unable to find a pending command");
2876 		return;
2877 	}
2878 
2879 	cmd->cmd_complete(cmd, mgmt_status(status));
2880 	mgmt_pending_remove(cmd);
2881 }
2882 
2883 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2884 {
2885 	struct mgmt_pending_cmd *cmd;
2886 
2887 	BT_DBG("status %u", status);
2888 
2889 	if (!status)
2890 		return;
2891 
2892 	cmd = find_pairing(conn);
2893 	if (!cmd) {
2894 		BT_DBG("Unable to find a pending command");
2895 		return;
2896 	}
2897 
2898 	cmd->cmd_complete(cmd, mgmt_status(status));
2899 	mgmt_pending_remove(cmd);
2900 }
2901 
2902 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2903 		       u16 len)
2904 {
2905 	struct mgmt_cp_pair_device *cp = data;
2906 	struct mgmt_rp_pair_device rp;
2907 	struct mgmt_pending_cmd *cmd;
2908 	u8 sec_level, auth_type;
2909 	struct hci_conn *conn;
2910 	int err;
2911 
2912 	bt_dev_dbg(hdev, "sock %p", sk);
2913 
2914 	memset(&rp, 0, sizeof(rp));
2915 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2916 	rp.addr.type = cp->addr.type;
2917 
2918 	if (!bdaddr_type_is_valid(cp->addr.type))
2919 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 					 MGMT_STATUS_INVALID_PARAMS,
2921 					 &rp, sizeof(rp));
2922 
2923 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2924 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2925 					 MGMT_STATUS_INVALID_PARAMS,
2926 					 &rp, sizeof(rp));
2927 
2928 	hci_dev_lock(hdev);
2929 
2930 	if (!hdev_is_powered(hdev)) {
2931 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2932 					MGMT_STATUS_NOT_POWERED, &rp,
2933 					sizeof(rp));
2934 		goto unlock;
2935 	}
2936 
2937 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2938 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2939 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2940 					sizeof(rp));
2941 		goto unlock;
2942 	}
2943 
2944 	sec_level = BT_SECURITY_MEDIUM;
2945 	auth_type = HCI_AT_DEDICATED_BONDING;
2946 
2947 	if (cp->addr.type == BDADDR_BREDR) {
2948 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2949 				       auth_type, CONN_REASON_PAIR_DEVICE);
2950 	} else {
2951 		u8 addr_type = le_addr_type(cp->addr.type);
2952 		struct hci_conn_params *p;
2953 
2954 		/* When pairing a new device, it is expected to remember
2955 		 * this device for future connections. Adding the connection
2956 		 * parameter information ahead of time allows tracking
2957 		 * of the slave preferred values and will speed up any
2958 		 * further connection establishment.
2959 		 *
2960 		 * If connection parameters already exist, then they
2961 		 * will be kept and this function does nothing.
2962 		 */
2963 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2964 
2965 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2966 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2967 
2968 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2969 					   sec_level, HCI_LE_CONN_TIMEOUT,
2970 					   CONN_REASON_PAIR_DEVICE);
2971 	}
2972 
2973 	if (IS_ERR(conn)) {
2974 		int status;
2975 
2976 		if (PTR_ERR(conn) == -EBUSY)
2977 			status = MGMT_STATUS_BUSY;
2978 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2979 			status = MGMT_STATUS_NOT_SUPPORTED;
2980 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2981 			status = MGMT_STATUS_REJECTED;
2982 		else
2983 			status = MGMT_STATUS_CONNECT_FAILED;
2984 
2985 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2986 					status, &rp, sizeof(rp));
2987 		goto unlock;
2988 	}
2989 
2990 	if (conn->connect_cfm_cb) {
2991 		hci_conn_drop(conn);
2992 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2993 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2994 		goto unlock;
2995 	}
2996 
2997 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2998 	if (!cmd) {
2999 		err = -ENOMEM;
3000 		hci_conn_drop(conn);
3001 		goto unlock;
3002 	}
3003 
3004 	cmd->cmd_complete = pairing_complete;
3005 
3006 	/* For LE, just connecting isn't a proof that the pairing finished */
3007 	if (cp->addr.type == BDADDR_BREDR) {
3008 		conn->connect_cfm_cb = pairing_complete_cb;
3009 		conn->security_cfm_cb = pairing_complete_cb;
3010 		conn->disconn_cfm_cb = pairing_complete_cb;
3011 	} else {
3012 		conn->connect_cfm_cb = le_pairing_complete_cb;
3013 		conn->security_cfm_cb = le_pairing_complete_cb;
3014 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3015 	}
3016 
3017 	conn->io_capability = cp->io_cap;
3018 	cmd->user_data = hci_conn_get(conn);
3019 
3020 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3021 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3022 		cmd->cmd_complete(cmd, 0);
3023 		mgmt_pending_remove(cmd);
3024 	}
3025 
3026 	err = 0;
3027 
3028 unlock:
3029 	hci_dev_unlock(hdev);
3030 	return err;
3031 }
3032 
3033 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3034 			      u16 len)
3035 {
3036 	struct mgmt_addr_info *addr = data;
3037 	struct mgmt_pending_cmd *cmd;
3038 	struct hci_conn *conn;
3039 	int err;
3040 
3041 	bt_dev_dbg(hdev, "sock %p", sk);
3042 
3043 	hci_dev_lock(hdev);
3044 
3045 	if (!hdev_is_powered(hdev)) {
3046 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 				      MGMT_STATUS_NOT_POWERED);
3048 		goto unlock;
3049 	}
3050 
3051 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3052 	if (!cmd) {
3053 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3054 				      MGMT_STATUS_INVALID_PARAMS);
3055 		goto unlock;
3056 	}
3057 
3058 	conn = cmd->user_data;
3059 
3060 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3061 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3062 				      MGMT_STATUS_INVALID_PARAMS);
3063 		goto unlock;
3064 	}
3065 
3066 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3067 	mgmt_pending_remove(cmd);
3068 
3069 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3070 				addr, sizeof(*addr));
3071 
3072 	/* Since user doesn't want to proceed with the connection, abort any
3073 	 * ongoing pairing and then terminate the link if it was created
3074 	 * because of the pair device action.
3075 	 */
3076 	if (addr->type == BDADDR_BREDR)
3077 		hci_remove_link_key(hdev, &addr->bdaddr);
3078 	else
3079 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3080 					      le_addr_type(addr->type));
3081 
3082 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3083 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3084 
3085 unlock:
3086 	hci_dev_unlock(hdev);
3087 	return err;
3088 }
3089 
3090 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3091 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3092 			     u16 hci_op, __le32 passkey)
3093 {
3094 	struct mgmt_pending_cmd *cmd;
3095 	struct hci_conn *conn;
3096 	int err;
3097 
3098 	hci_dev_lock(hdev);
3099 
3100 	if (!hdev_is_powered(hdev)) {
3101 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3102 					MGMT_STATUS_NOT_POWERED, addr,
3103 					sizeof(*addr));
3104 		goto done;
3105 	}
3106 
3107 	if (addr->type == BDADDR_BREDR)
3108 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3109 	else
3110 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3111 					       le_addr_type(addr->type));
3112 
3113 	if (!conn) {
3114 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3115 					MGMT_STATUS_NOT_CONNECTED, addr,
3116 					sizeof(*addr));
3117 		goto done;
3118 	}
3119 
3120 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3121 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3122 		if (!err)
3123 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3124 						MGMT_STATUS_SUCCESS, addr,
3125 						sizeof(*addr));
3126 		else
3127 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3128 						MGMT_STATUS_FAILED, addr,
3129 						sizeof(*addr));
3130 
3131 		goto done;
3132 	}
3133 
3134 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3135 	if (!cmd) {
3136 		err = -ENOMEM;
3137 		goto done;
3138 	}
3139 
3140 	cmd->cmd_complete = addr_cmd_complete;
3141 
3142 	/* Continue with pairing via HCI */
3143 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3144 		struct hci_cp_user_passkey_reply cp;
3145 
3146 		bacpy(&cp.bdaddr, &addr->bdaddr);
3147 		cp.passkey = passkey;
3148 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3149 	} else
3150 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3151 				   &addr->bdaddr);
3152 
3153 	if (err < 0)
3154 		mgmt_pending_remove(cmd);
3155 
3156 done:
3157 	hci_dev_unlock(hdev);
3158 	return err;
3159 }
3160 
3161 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3162 			      void *data, u16 len)
3163 {
3164 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3165 
3166 	bt_dev_dbg(hdev, "sock %p", sk);
3167 
3168 	return user_pairing_resp(sk, hdev, &cp->addr,
3169 				MGMT_OP_PIN_CODE_NEG_REPLY,
3170 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3171 }
3172 
3173 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3174 			      u16 len)
3175 {
3176 	struct mgmt_cp_user_confirm_reply *cp = data;
3177 
3178 	bt_dev_dbg(hdev, "sock %p", sk);
3179 
3180 	if (len != sizeof(*cp))
3181 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3182 				       MGMT_STATUS_INVALID_PARAMS);
3183 
3184 	return user_pairing_resp(sk, hdev, &cp->addr,
3185 				 MGMT_OP_USER_CONFIRM_REPLY,
3186 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3187 }
3188 
3189 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3190 				  void *data, u16 len)
3191 {
3192 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3193 
3194 	bt_dev_dbg(hdev, "sock %p", sk);
3195 
3196 	return user_pairing_resp(sk, hdev, &cp->addr,
3197 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3198 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3199 }
3200 
3201 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3202 			      u16 len)
3203 {
3204 	struct mgmt_cp_user_passkey_reply *cp = data;
3205 
3206 	bt_dev_dbg(hdev, "sock %p", sk);
3207 
3208 	return user_pairing_resp(sk, hdev, &cp->addr,
3209 				 MGMT_OP_USER_PASSKEY_REPLY,
3210 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3211 }
3212 
3213 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3214 				  void *data, u16 len)
3215 {
3216 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3217 
3218 	bt_dev_dbg(hdev, "sock %p", sk);
3219 
3220 	return user_pairing_resp(sk, hdev, &cp->addr,
3221 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3222 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3223 }
3224 
3225 static void adv_expire(struct hci_dev *hdev, u32 flags)
3226 {
3227 	struct adv_info *adv_instance;
3228 	struct hci_request req;
3229 	int err;
3230 
3231 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3232 	if (!adv_instance)
3233 		return;
3234 
3235 	/* stop if current instance doesn't need to be changed */
3236 	if (!(adv_instance->flags & flags))
3237 		return;
3238 
3239 	cancel_adv_timeout(hdev);
3240 
3241 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3242 	if (!adv_instance)
3243 		return;
3244 
3245 	hci_req_init(&req, hdev);
3246 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3247 					      true);
3248 	if (err)
3249 		return;
3250 
3251 	hci_req_run(&req, NULL);
3252 }
3253 
3254 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3255 {
3256 	struct mgmt_cp_set_local_name *cp;
3257 	struct mgmt_pending_cmd *cmd;
3258 
3259 	bt_dev_dbg(hdev, "status 0x%02x", status);
3260 
3261 	hci_dev_lock(hdev);
3262 
3263 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3264 	if (!cmd)
3265 		goto unlock;
3266 
3267 	cp = cmd->param;
3268 
3269 	if (status) {
3270 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3271 			        mgmt_status(status));
3272 	} else {
3273 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3274 				  cp, sizeof(*cp));
3275 
3276 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3277 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3278 	}
3279 
3280 	mgmt_pending_remove(cmd);
3281 
3282 unlock:
3283 	hci_dev_unlock(hdev);
3284 }
3285 
3286 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3287 			  u16 len)
3288 {
3289 	struct mgmt_cp_set_local_name *cp = data;
3290 	struct mgmt_pending_cmd *cmd;
3291 	struct hci_request req;
3292 	int err;
3293 
3294 	bt_dev_dbg(hdev, "sock %p", sk);
3295 
3296 	hci_dev_lock(hdev);
3297 
3298 	/* If the old values are the same as the new ones just return a
3299 	 * direct command complete event.
3300 	 */
3301 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3302 	    !memcmp(hdev->short_name, cp->short_name,
3303 		    sizeof(hdev->short_name))) {
3304 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3305 					data, len);
3306 		goto failed;
3307 	}
3308 
3309 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3310 
3311 	if (!hdev_is_powered(hdev)) {
3312 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3313 
3314 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3315 					data, len);
3316 		if (err < 0)
3317 			goto failed;
3318 
3319 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3320 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3321 		ext_info_changed(hdev, sk);
3322 
3323 		goto failed;
3324 	}
3325 
3326 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3327 	if (!cmd) {
3328 		err = -ENOMEM;
3329 		goto failed;
3330 	}
3331 
3332 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3333 
3334 	hci_req_init(&req, hdev);
3335 
3336 	if (lmp_bredr_capable(hdev)) {
3337 		__hci_req_update_name(&req);
3338 		__hci_req_update_eir(&req);
3339 	}
3340 
3341 	/* The name is stored in the scan response data and so
3342 	 * no need to udpate the advertising data here.
3343 	 */
3344 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3345 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3346 
3347 	err = hci_req_run(&req, set_name_complete);
3348 	if (err < 0)
3349 		mgmt_pending_remove(cmd);
3350 
3351 failed:
3352 	hci_dev_unlock(hdev);
3353 	return err;
3354 }
3355 
3356 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3357 			  u16 len)
3358 {
3359 	struct mgmt_cp_set_appearance *cp = data;
3360 	u16 appearance;
3361 	int err;
3362 
3363 	bt_dev_dbg(hdev, "sock %p", sk);
3364 
3365 	if (!lmp_le_capable(hdev))
3366 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3367 				       MGMT_STATUS_NOT_SUPPORTED);
3368 
3369 	appearance = le16_to_cpu(cp->appearance);
3370 
3371 	hci_dev_lock(hdev);
3372 
3373 	if (hdev->appearance != appearance) {
3374 		hdev->appearance = appearance;
3375 
3376 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3377 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3378 
3379 		ext_info_changed(hdev, sk);
3380 	}
3381 
3382 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3383 				0);
3384 
3385 	hci_dev_unlock(hdev);
3386 
3387 	return err;
3388 }
3389 
3390 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3391 				 void *data, u16 len)
3392 {
3393 	struct mgmt_rp_get_phy_configuration rp;
3394 
3395 	bt_dev_dbg(hdev, "sock %p", sk);
3396 
3397 	hci_dev_lock(hdev);
3398 
3399 	memset(&rp, 0, sizeof(rp));
3400 
3401 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3402 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3403 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3404 
3405 	hci_dev_unlock(hdev);
3406 
3407 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3408 				 &rp, sizeof(rp));
3409 }
3410 
3411 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3412 {
3413 	struct mgmt_ev_phy_configuration_changed ev;
3414 
3415 	memset(&ev, 0, sizeof(ev));
3416 
3417 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3418 
3419 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3420 			  sizeof(ev), skip);
3421 }
3422 
3423 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3424 				     u16 opcode, struct sk_buff *skb)
3425 {
3426 	struct mgmt_pending_cmd *cmd;
3427 
3428 	bt_dev_dbg(hdev, "status 0x%02x", status);
3429 
3430 	hci_dev_lock(hdev);
3431 
3432 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3433 	if (!cmd)
3434 		goto unlock;
3435 
3436 	if (status) {
3437 		mgmt_cmd_status(cmd->sk, hdev->id,
3438 				MGMT_OP_SET_PHY_CONFIGURATION,
3439 				mgmt_status(status));
3440 	} else {
3441 		mgmt_cmd_complete(cmd->sk, hdev->id,
3442 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3443 				  NULL, 0);
3444 
3445 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3446 	}
3447 
3448 	mgmt_pending_remove(cmd);
3449 
3450 unlock:
3451 	hci_dev_unlock(hdev);
3452 }
3453 
3454 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3455 				 void *data, u16 len)
3456 {
3457 	struct mgmt_cp_set_phy_configuration *cp = data;
3458 	struct hci_cp_le_set_default_phy cp_phy;
3459 	struct mgmt_pending_cmd *cmd;
3460 	struct hci_request req;
3461 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3462 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3463 	bool changed = false;
3464 	int err;
3465 
3466 	bt_dev_dbg(hdev, "sock %p", sk);
3467 
3468 	configurable_phys = get_configurable_phys(hdev);
3469 	supported_phys = get_supported_phys(hdev);
3470 	selected_phys = __le32_to_cpu(cp->selected_phys);
3471 
3472 	if (selected_phys & ~supported_phys)
3473 		return mgmt_cmd_status(sk, hdev->id,
3474 				       MGMT_OP_SET_PHY_CONFIGURATION,
3475 				       MGMT_STATUS_INVALID_PARAMS);
3476 
3477 	unconfigure_phys = supported_phys & ~configurable_phys;
3478 
3479 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3480 		return mgmt_cmd_status(sk, hdev->id,
3481 				       MGMT_OP_SET_PHY_CONFIGURATION,
3482 				       MGMT_STATUS_INVALID_PARAMS);
3483 
3484 	if (selected_phys == get_selected_phys(hdev))
3485 		return mgmt_cmd_complete(sk, hdev->id,
3486 					 MGMT_OP_SET_PHY_CONFIGURATION,
3487 					 0, NULL, 0);
3488 
3489 	hci_dev_lock(hdev);
3490 
3491 	if (!hdev_is_powered(hdev)) {
3492 		err = mgmt_cmd_status(sk, hdev->id,
3493 				      MGMT_OP_SET_PHY_CONFIGURATION,
3494 				      MGMT_STATUS_REJECTED);
3495 		goto unlock;
3496 	}
3497 
3498 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3499 		err = mgmt_cmd_status(sk, hdev->id,
3500 				      MGMT_OP_SET_PHY_CONFIGURATION,
3501 				      MGMT_STATUS_BUSY);
3502 		goto unlock;
3503 	}
3504 
3505 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3506 		pkt_type |= (HCI_DH3 | HCI_DM3);
3507 	else
3508 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3509 
3510 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3511 		pkt_type |= (HCI_DH5 | HCI_DM5);
3512 	else
3513 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3514 
3515 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3516 		pkt_type &= ~HCI_2DH1;
3517 	else
3518 		pkt_type |= HCI_2DH1;
3519 
3520 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3521 		pkt_type &= ~HCI_2DH3;
3522 	else
3523 		pkt_type |= HCI_2DH3;
3524 
3525 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3526 		pkt_type &= ~HCI_2DH5;
3527 	else
3528 		pkt_type |= HCI_2DH5;
3529 
3530 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3531 		pkt_type &= ~HCI_3DH1;
3532 	else
3533 		pkt_type |= HCI_3DH1;
3534 
3535 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3536 		pkt_type &= ~HCI_3DH3;
3537 	else
3538 		pkt_type |= HCI_3DH3;
3539 
3540 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3541 		pkt_type &= ~HCI_3DH5;
3542 	else
3543 		pkt_type |= HCI_3DH5;
3544 
3545 	if (pkt_type != hdev->pkt_type) {
3546 		hdev->pkt_type = pkt_type;
3547 		changed = true;
3548 	}
3549 
3550 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3551 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3552 		if (changed)
3553 			mgmt_phy_configuration_changed(hdev, sk);
3554 
3555 		err = mgmt_cmd_complete(sk, hdev->id,
3556 					MGMT_OP_SET_PHY_CONFIGURATION,
3557 					0, NULL, 0);
3558 
3559 		goto unlock;
3560 	}
3561 
3562 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3563 			       len);
3564 	if (!cmd) {
3565 		err = -ENOMEM;
3566 		goto unlock;
3567 	}
3568 
3569 	hci_req_init(&req, hdev);
3570 
3571 	memset(&cp_phy, 0, sizeof(cp_phy));
3572 
3573 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3574 		cp_phy.all_phys |= 0x01;
3575 
3576 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3577 		cp_phy.all_phys |= 0x02;
3578 
3579 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3580 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3581 
3582 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3583 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3584 
3585 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3586 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3587 
3588 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3589 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3590 
3591 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3592 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3593 
3594 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3595 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3596 
3597 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3598 
3599 	err = hci_req_run_skb(&req, set_default_phy_complete);
3600 	if (err < 0)
3601 		mgmt_pending_remove(cmd);
3602 
3603 unlock:
3604 	hci_dev_unlock(hdev);
3605 
3606 	return err;
3607 }
3608 
3609 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3610 			    u16 len)
3611 {
3612 	int err = MGMT_STATUS_SUCCESS;
3613 	struct mgmt_cp_set_blocked_keys *keys = data;
3614 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3615 				   sizeof(struct mgmt_blocked_key_info));
3616 	u16 key_count, expected_len;
3617 	int i;
3618 
3619 	bt_dev_dbg(hdev, "sock %p", sk);
3620 
3621 	key_count = __le16_to_cpu(keys->key_count);
3622 	if (key_count > max_key_count) {
3623 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3624 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3625 				       MGMT_STATUS_INVALID_PARAMS);
3626 	}
3627 
3628 	expected_len = struct_size(keys, keys, key_count);
3629 	if (expected_len != len) {
3630 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3631 			   expected_len, len);
3632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3633 				       MGMT_STATUS_INVALID_PARAMS);
3634 	}
3635 
3636 	hci_dev_lock(hdev);
3637 
3638 	hci_blocked_keys_clear(hdev);
3639 
3640 	for (i = 0; i < keys->key_count; ++i) {
3641 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3642 
3643 		if (!b) {
3644 			err = MGMT_STATUS_NO_RESOURCES;
3645 			break;
3646 		}
3647 
3648 		b->type = keys->keys[i].type;
3649 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3650 		list_add_rcu(&b->list, &hdev->blocked_keys);
3651 	}
3652 	hci_dev_unlock(hdev);
3653 
3654 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3655 				err, NULL, 0);
3656 }
3657 
3658 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3659 			       void *data, u16 len)
3660 {
3661 	struct mgmt_mode *cp = data;
3662 	int err;
3663 	bool changed = false;
3664 
3665 	bt_dev_dbg(hdev, "sock %p", sk);
3666 
3667 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3668 		return mgmt_cmd_status(sk, hdev->id,
3669 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3670 				       MGMT_STATUS_NOT_SUPPORTED);
3671 
3672 	if (cp->val != 0x00 && cp->val != 0x01)
3673 		return mgmt_cmd_status(sk, hdev->id,
3674 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3675 				       MGMT_STATUS_INVALID_PARAMS);
3676 
3677 	hci_dev_lock(hdev);
3678 
3679 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3680 		err = mgmt_cmd_status(sk, hdev->id,
3681 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3682 				      MGMT_STATUS_BUSY);
3683 		goto unlock;
3684 	}
3685 
3686 	if (hdev_is_powered(hdev) &&
3687 	    !!cp->val != hci_dev_test_flag(hdev,
3688 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3689 		err = mgmt_cmd_status(sk, hdev->id,
3690 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3691 				      MGMT_STATUS_REJECTED);
3692 		goto unlock;
3693 	}
3694 
3695 	if (cp->val)
3696 		changed = !hci_dev_test_and_set_flag(hdev,
3697 						   HCI_WIDEBAND_SPEECH_ENABLED);
3698 	else
3699 		changed = hci_dev_test_and_clear_flag(hdev,
3700 						   HCI_WIDEBAND_SPEECH_ENABLED);
3701 
3702 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3703 	if (err < 0)
3704 		goto unlock;
3705 
3706 	if (changed)
3707 		err = new_settings(hdev, sk);
3708 
3709 unlock:
3710 	hci_dev_unlock(hdev);
3711 	return err;
3712 }
3713 
3714 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3715 			       void *data, u16 data_len)
3716 {
3717 	char buf[20];
3718 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3719 	u16 cap_len = 0;
3720 	u8 flags = 0;
3721 	u8 tx_power_range[2];
3722 
3723 	bt_dev_dbg(hdev, "sock %p", sk);
3724 
3725 	memset(&buf, 0, sizeof(buf));
3726 
3727 	hci_dev_lock(hdev);
3728 
3729 	/* When the Read Simple Pairing Options command is supported, then
3730 	 * the remote public key validation is supported.
3731 	 */
3732 	if (hdev->commands[41] & 0x08)
3733 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3734 
3735 	flags |= 0x02;		/* Remote public key validation (LE) */
3736 
3737 	/* When the Read Encryption Key Size command is supported, then the
3738 	 * encryption key size is enforced.
3739 	 */
3740 	if (hdev->commands[20] & 0x10)
3741 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3742 
3743 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3744 
3745 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3746 				  &flags, 1);
3747 
3748 	/* When the Read Simple Pairing Options command is supported, then
3749 	 * also max encryption key size information is provided.
3750 	 */
3751 	if (hdev->commands[41] & 0x08)
3752 		cap_len = eir_append_le16(rp->cap, cap_len,
3753 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3754 					  hdev->max_enc_key_size);
3755 
3756 	cap_len = eir_append_le16(rp->cap, cap_len,
3757 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3758 				  SMP_MAX_ENC_KEY_SIZE);
3759 
3760 	/* Append the min/max LE tx power parameters if we were able to fetch
3761 	 * it from the controller
3762 	 */
3763 	if (hdev->commands[38] & 0x80) {
3764 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3765 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3766 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3767 					  tx_power_range, 2);
3768 	}
3769 
3770 	rp->cap_len = cpu_to_le16(cap_len);
3771 
3772 	hci_dev_unlock(hdev);
3773 
3774 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3775 				 rp, sizeof(*rp) + cap_len);
3776 }
3777 
3778 #ifdef CONFIG_BT_FEATURE_DEBUG
3779 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3780 static const u8 debug_uuid[16] = {
3781 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3782 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3783 };
3784 #endif
3785 
3786 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3787 static const u8 simult_central_periph_uuid[16] = {
3788 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3789 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3790 };
3791 
3792 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3793 static const u8 rpa_resolution_uuid[16] = {
3794 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3795 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3796 };
3797 
3798 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3799 				  void *data, u16 data_len)
3800 {
3801 	char buf[62];	/* Enough space for 3 features */
3802 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3803 	u16 idx = 0;
3804 	u32 flags;
3805 
3806 	bt_dev_dbg(hdev, "sock %p", sk);
3807 
3808 	memset(&buf, 0, sizeof(buf));
3809 
3810 #ifdef CONFIG_BT_FEATURE_DEBUG
3811 	if (!hdev) {
3812 		flags = bt_dbg_get() ? BIT(0) : 0;
3813 
3814 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3815 		rp->features[idx].flags = cpu_to_le32(flags);
3816 		idx++;
3817 	}
3818 #endif
3819 
3820 	if (hdev) {
3821 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3822 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3823 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3824 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3825 			flags = BIT(0);
3826 		else
3827 			flags = 0;
3828 
3829 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3830 		rp->features[idx].flags = cpu_to_le32(flags);
3831 		idx++;
3832 	}
3833 
3834 	if (hdev && use_ll_privacy(hdev)) {
3835 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3836 			flags = BIT(0) | BIT(1);
3837 		else
3838 			flags = BIT(1);
3839 
3840 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3841 		rp->features[idx].flags = cpu_to_le32(flags);
3842 		idx++;
3843 	}
3844 
3845 	rp->feature_count = cpu_to_le16(idx);
3846 
3847 	/* After reading the experimental features information, enable
3848 	 * the events to update client on any future change.
3849 	 */
3850 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3851 
3852 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3853 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3854 				 0, rp, sizeof(*rp) + (20 * idx));
3855 }
3856 
3857 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3858 					  struct sock *skip)
3859 {
3860 	struct mgmt_ev_exp_feature_changed ev;
3861 
3862 	memset(&ev, 0, sizeof(ev));
3863 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3864 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3865 
3866 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3867 				  &ev, sizeof(ev),
3868 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3869 
3870 }
3871 
3872 #ifdef CONFIG_BT_FEATURE_DEBUG
3873 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3874 {
3875 	struct mgmt_ev_exp_feature_changed ev;
3876 
3877 	memset(&ev, 0, sizeof(ev));
3878 	memcpy(ev.uuid, debug_uuid, 16);
3879 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3880 
3881 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3882 				  &ev, sizeof(ev),
3883 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3884 }
3885 #endif
3886 
3887 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3888 			   void *data, u16 data_len)
3889 {
3890 	struct mgmt_cp_set_exp_feature *cp = data;
3891 	struct mgmt_rp_set_exp_feature rp;
3892 
3893 	bt_dev_dbg(hdev, "sock %p", sk);
3894 
3895 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3896 		memset(rp.uuid, 0, 16);
3897 		rp.flags = cpu_to_le32(0);
3898 
3899 #ifdef CONFIG_BT_FEATURE_DEBUG
3900 		if (!hdev) {
3901 			bool changed = bt_dbg_get();
3902 
3903 			bt_dbg_set(false);
3904 
3905 			if (changed)
3906 				exp_debug_feature_changed(false, sk);
3907 		}
3908 #endif
3909 
3910 		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3911 			bool changed = hci_dev_test_flag(hdev,
3912 							 HCI_ENABLE_LL_PRIVACY);
3913 
3914 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3915 
3916 			if (changed)
3917 				exp_ll_privacy_feature_changed(false, hdev, sk);
3918 		}
3919 
3920 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3921 
3922 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3923 					 MGMT_OP_SET_EXP_FEATURE, 0,
3924 					 &rp, sizeof(rp));
3925 	}
3926 
3927 #ifdef CONFIG_BT_FEATURE_DEBUG
3928 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3929 		bool val, changed;
3930 		int err;
3931 
3932 		/* Command requires to use the non-controller index */
3933 		if (hdev)
3934 			return mgmt_cmd_status(sk, hdev->id,
3935 					       MGMT_OP_SET_EXP_FEATURE,
3936 					       MGMT_STATUS_INVALID_INDEX);
3937 
3938 		/* Parameters are limited to a single octet */
3939 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3940 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3941 					       MGMT_OP_SET_EXP_FEATURE,
3942 					       MGMT_STATUS_INVALID_PARAMS);
3943 
3944 		/* Only boolean on/off is supported */
3945 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3946 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3947 					       MGMT_OP_SET_EXP_FEATURE,
3948 					       MGMT_STATUS_INVALID_PARAMS);
3949 
3950 		val = !!cp->param[0];
3951 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3952 		bt_dbg_set(val);
3953 
3954 		memcpy(rp.uuid, debug_uuid, 16);
3955 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3956 
3957 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3958 
3959 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3960 					MGMT_OP_SET_EXP_FEATURE, 0,
3961 					&rp, sizeof(rp));
3962 
3963 		if (changed)
3964 			exp_debug_feature_changed(val, sk);
3965 
3966 		return err;
3967 	}
3968 #endif
3969 
3970 	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3971 		bool val, changed;
3972 		int err;
3973 		u32 flags;
3974 
3975 		/* Command requires to use the controller index */
3976 		if (!hdev)
3977 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3978 					       MGMT_OP_SET_EXP_FEATURE,
3979 					       MGMT_STATUS_INVALID_INDEX);
3980 
3981 		/* Changes can only be made when controller is powered down */
3982 		if (hdev_is_powered(hdev))
3983 			return mgmt_cmd_status(sk, hdev->id,
3984 					       MGMT_OP_SET_EXP_FEATURE,
3985 					       MGMT_STATUS_NOT_POWERED);
3986 
3987 		/* Parameters are limited to a single octet */
3988 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3989 			return mgmt_cmd_status(sk, hdev->id,
3990 					       MGMT_OP_SET_EXP_FEATURE,
3991 					       MGMT_STATUS_INVALID_PARAMS);
3992 
3993 		/* Only boolean on/off is supported */
3994 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3995 			return mgmt_cmd_status(sk, hdev->id,
3996 					       MGMT_OP_SET_EXP_FEATURE,
3997 					       MGMT_STATUS_INVALID_PARAMS);
3998 
3999 		val = !!cp->param[0];
4000 
4001 		if (val) {
4002 			changed = !hci_dev_test_flag(hdev,
4003 						     HCI_ENABLE_LL_PRIVACY);
4004 			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4005 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4006 
4007 			/* Enable LL privacy + supported settings changed */
4008 			flags = BIT(0) | BIT(1);
4009 		} else {
4010 			changed = hci_dev_test_flag(hdev,
4011 						    HCI_ENABLE_LL_PRIVACY);
4012 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4013 
4014 			/* Disable LL privacy + supported settings changed */
4015 			flags = BIT(1);
4016 		}
4017 
4018 		memcpy(rp.uuid, rpa_resolution_uuid, 16);
4019 		rp.flags = cpu_to_le32(flags);
4020 
4021 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4022 
4023 		err = mgmt_cmd_complete(sk, hdev->id,
4024 					MGMT_OP_SET_EXP_FEATURE, 0,
4025 					&rp, sizeof(rp));
4026 
4027 		if (changed)
4028 			exp_ll_privacy_feature_changed(val, hdev, sk);
4029 
4030 		return err;
4031 	}
4032 
4033 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4034 			       MGMT_OP_SET_EXP_FEATURE,
4035 			       MGMT_STATUS_NOT_SUPPORTED);
4036 }
4037 
4038 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4039 
4040 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4041 			    u16 data_len)
4042 {
4043 	struct mgmt_cp_get_device_flags *cp = data;
4044 	struct mgmt_rp_get_device_flags rp;
4045 	struct bdaddr_list_with_flags *br_params;
4046 	struct hci_conn_params *params;
4047 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4048 	u32 current_flags = 0;
4049 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4050 
4051 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4052 		   &cp->addr.bdaddr, cp->addr.type);
4053 
4054 	hci_dev_lock(hdev);
4055 
4056 	if (cp->addr.type == BDADDR_BREDR) {
4057 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4058 							      &cp->addr.bdaddr,
4059 							      cp->addr.type);
4060 		if (!br_params)
4061 			goto done;
4062 
4063 		current_flags = br_params->current_flags;
4064 	} else {
4065 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4066 						le_addr_type(cp->addr.type));
4067 
4068 		if (!params)
4069 			goto done;
4070 
4071 		current_flags = params->current_flags;
4072 	}
4073 
4074 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4075 	rp.addr.type = cp->addr.type;
4076 	rp.supported_flags = cpu_to_le32(supported_flags);
4077 	rp.current_flags = cpu_to_le32(current_flags);
4078 
4079 	status = MGMT_STATUS_SUCCESS;
4080 
4081 done:
4082 	hci_dev_unlock(hdev);
4083 
4084 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4085 				&rp, sizeof(rp));
4086 }
4087 
4088 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4089 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4090 				 u32 supported_flags, u32 current_flags)
4091 {
4092 	struct mgmt_ev_device_flags_changed ev;
4093 
4094 	bacpy(&ev.addr.bdaddr, bdaddr);
4095 	ev.addr.type = bdaddr_type;
4096 	ev.supported_flags = cpu_to_le32(supported_flags);
4097 	ev.current_flags = cpu_to_le32(current_flags);
4098 
4099 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4100 }
4101 
4102 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4103 			    u16 len)
4104 {
4105 	struct mgmt_cp_set_device_flags *cp = data;
4106 	struct bdaddr_list_with_flags *br_params;
4107 	struct hci_conn_params *params;
4108 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4109 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4110 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4111 
4112 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4113 		   &cp->addr.bdaddr, cp->addr.type,
4114 		   __le32_to_cpu(current_flags));
4115 
4116 	if ((supported_flags | current_flags) != supported_flags) {
4117 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4118 			    current_flags, supported_flags);
4119 		goto done;
4120 	}
4121 
4122 	hci_dev_lock(hdev);
4123 
4124 	if (cp->addr.type == BDADDR_BREDR) {
4125 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4126 							      &cp->addr.bdaddr,
4127 							      cp->addr.type);
4128 
4129 		if (br_params) {
4130 			br_params->current_flags = current_flags;
4131 			status = MGMT_STATUS_SUCCESS;
4132 		} else {
4133 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4134 				    &cp->addr.bdaddr, cp->addr.type);
4135 		}
4136 	} else {
4137 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4138 						le_addr_type(cp->addr.type));
4139 		if (params) {
4140 			params->current_flags = current_flags;
4141 			status = MGMT_STATUS_SUCCESS;
4142 		} else {
4143 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4144 				    &cp->addr.bdaddr,
4145 				    le_addr_type(cp->addr.type));
4146 		}
4147 	}
4148 
4149 done:
4150 	hci_dev_unlock(hdev);
4151 
4152 	if (status == MGMT_STATUS_SUCCESS)
4153 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4154 				     supported_flags, current_flags);
4155 
4156 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4157 				 &cp->addr, sizeof(cp->addr));
4158 }
4159 
4160 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4161 				   u16 handle)
4162 {
4163 	struct mgmt_ev_adv_monitor_added ev;
4164 
4165 	ev.monitor_handle = cpu_to_le16(handle);
4166 
4167 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4168 }
4169 
4170 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4171 {
4172 	struct mgmt_ev_adv_monitor_removed ev;
4173 	struct mgmt_pending_cmd *cmd;
4174 	struct sock *sk_skip = NULL;
4175 	struct mgmt_cp_remove_adv_monitor *cp;
4176 
4177 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4178 	if (cmd) {
4179 		cp = cmd->param;
4180 
4181 		if (cp->monitor_handle)
4182 			sk_skip = cmd->sk;
4183 	}
4184 
4185 	ev.monitor_handle = cpu_to_le16(handle);
4186 
4187 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4188 }
4189 
4190 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4191 				 void *data, u16 len)
4192 {
4193 	struct adv_monitor *monitor = NULL;
4194 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4195 	int handle, err;
4196 	size_t rp_size = 0;
4197 	__u32 supported = 0;
4198 	__u32 enabled = 0;
4199 	__u16 num_handles = 0;
4200 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4201 
4202 	BT_DBG("request for %s", hdev->name);
4203 
4204 	hci_dev_lock(hdev);
4205 
4206 	if (msft_monitor_supported(hdev))
4207 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4208 
4209 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4210 		handles[num_handles++] = monitor->handle;
4211 
4212 	hci_dev_unlock(hdev);
4213 
4214 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4215 	rp = kmalloc(rp_size, GFP_KERNEL);
4216 	if (!rp)
4217 		return -ENOMEM;
4218 
4219 	/* All supported features are currently enabled */
4220 	enabled = supported;
4221 
4222 	rp->supported_features = cpu_to_le32(supported);
4223 	rp->enabled_features = cpu_to_le32(enabled);
4224 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4225 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4226 	rp->num_handles = cpu_to_le16(num_handles);
4227 	if (num_handles)
4228 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4229 
4230 	err = mgmt_cmd_complete(sk, hdev->id,
4231 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4232 				MGMT_STATUS_SUCCESS, rp, rp_size);
4233 
4234 	kfree(rp);
4235 
4236 	return err;
4237 }
4238 
4239 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4240 {
4241 	struct mgmt_rp_add_adv_patterns_monitor rp;
4242 	struct mgmt_pending_cmd *cmd;
4243 	struct adv_monitor *monitor;
4244 	int err = 0;
4245 
4246 	hci_dev_lock(hdev);
4247 
4248 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4249 	if (!cmd) {
4250 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4251 		if (!cmd)
4252 			goto done;
4253 	}
4254 
4255 	monitor = cmd->user_data;
4256 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4257 
4258 	if (!status) {
4259 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4260 		hdev->adv_monitors_cnt++;
4261 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4262 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4263 		hci_update_background_scan(hdev);
4264 	}
4265 
4266 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4267 				mgmt_status(status), &rp, sizeof(rp));
4268 	mgmt_pending_remove(cmd);
4269 
4270 done:
4271 	hci_dev_unlock(hdev);
4272 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4273 		   rp.monitor_handle, status);
4274 
4275 	return err;
4276 }
4277 
4278 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4279 				      struct adv_monitor *m, u8 status,
4280 				      void *data, u16 len, u16 op)
4281 {
4282 	struct mgmt_rp_add_adv_patterns_monitor rp;
4283 	struct mgmt_pending_cmd *cmd;
4284 	int err;
4285 	bool pending;
4286 
4287 	hci_dev_lock(hdev);
4288 
4289 	if (status)
4290 		goto unlock;
4291 
4292 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4293 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4294 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4295 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4296 		status = MGMT_STATUS_BUSY;
4297 		goto unlock;
4298 	}
4299 
4300 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4301 	if (!cmd) {
4302 		status = MGMT_STATUS_NO_RESOURCES;
4303 		goto unlock;
4304 	}
4305 
4306 	cmd->user_data = m;
4307 	pending = hci_add_adv_monitor(hdev, m, &err);
4308 	if (err) {
4309 		if (err == -ENOSPC || err == -ENOMEM)
4310 			status = MGMT_STATUS_NO_RESOURCES;
4311 		else if (err == -EINVAL)
4312 			status = MGMT_STATUS_INVALID_PARAMS;
4313 		else
4314 			status = MGMT_STATUS_FAILED;
4315 
4316 		mgmt_pending_remove(cmd);
4317 		goto unlock;
4318 	}
4319 
4320 	if (!pending) {
4321 		mgmt_pending_remove(cmd);
4322 		rp.monitor_handle = cpu_to_le16(m->handle);
4323 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4324 		m->state = ADV_MONITOR_STATE_REGISTERED;
4325 		hdev->adv_monitors_cnt++;
4326 
4327 		hci_dev_unlock(hdev);
4328 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4329 					 &rp, sizeof(rp));
4330 	}
4331 
4332 	hci_dev_unlock(hdev);
4333 
4334 	return 0;
4335 
4336 unlock:
4337 	hci_free_adv_monitor(hdev, m);
4338 	hci_dev_unlock(hdev);
4339 	return mgmt_cmd_status(sk, hdev->id, op, status);
4340 }
4341 
4342 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4343 				   struct mgmt_adv_rssi_thresholds *rssi)
4344 {
4345 	if (rssi) {
4346 		m->rssi.low_threshold = rssi->low_threshold;
4347 		m->rssi.low_threshold_timeout =
4348 		    __le16_to_cpu(rssi->low_threshold_timeout);
4349 		m->rssi.high_threshold = rssi->high_threshold;
4350 		m->rssi.high_threshold_timeout =
4351 		    __le16_to_cpu(rssi->high_threshold_timeout);
4352 		m->rssi.sampling_period = rssi->sampling_period;
4353 	} else {
4354 		/* Default values. These numbers are the least constricting
4355 		 * parameters for MSFT API to work, so it behaves as if there
4356 		 * are no rssi parameter to consider. May need to be changed
4357 		 * if other API are to be supported.
4358 		 */
4359 		m->rssi.low_threshold = -127;
4360 		m->rssi.low_threshold_timeout = 60;
4361 		m->rssi.high_threshold = -127;
4362 		m->rssi.high_threshold_timeout = 0;
4363 		m->rssi.sampling_period = 0;
4364 	}
4365 }
4366 
4367 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4368 				    struct mgmt_adv_pattern *patterns)
4369 {
4370 	u8 offset = 0, length = 0;
4371 	struct adv_pattern *p = NULL;
4372 	int i;
4373 
4374 	for (i = 0; i < pattern_count; i++) {
4375 		offset = patterns[i].offset;
4376 		length = patterns[i].length;
4377 		if (offset >= HCI_MAX_AD_LENGTH ||
4378 		    length > HCI_MAX_AD_LENGTH ||
4379 		    (offset + length) > HCI_MAX_AD_LENGTH)
4380 			return MGMT_STATUS_INVALID_PARAMS;
4381 
4382 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4383 		if (!p)
4384 			return MGMT_STATUS_NO_RESOURCES;
4385 
4386 		p->ad_type = patterns[i].ad_type;
4387 		p->offset = patterns[i].offset;
4388 		p->length = patterns[i].length;
4389 		memcpy(p->value, patterns[i].value, p->length);
4390 
4391 		INIT_LIST_HEAD(&p->list);
4392 		list_add(&p->list, &m->patterns);
4393 	}
4394 
4395 	return MGMT_STATUS_SUCCESS;
4396 }
4397 
4398 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4399 				    void *data, u16 len)
4400 {
4401 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4402 	struct adv_monitor *m = NULL;
4403 	u8 status = MGMT_STATUS_SUCCESS;
4404 	size_t expected_size = sizeof(*cp);
4405 
4406 	BT_DBG("request for %s", hdev->name);
4407 
4408 	if (len <= sizeof(*cp)) {
4409 		status = MGMT_STATUS_INVALID_PARAMS;
4410 		goto done;
4411 	}
4412 
4413 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4414 	if (len != expected_size) {
4415 		status = MGMT_STATUS_INVALID_PARAMS;
4416 		goto done;
4417 	}
4418 
4419 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4420 	if (!m) {
4421 		status = MGMT_STATUS_NO_RESOURCES;
4422 		goto done;
4423 	}
4424 
4425 	INIT_LIST_HEAD(&m->patterns);
4426 
4427 	parse_adv_monitor_rssi(m, NULL);
4428 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4429 
4430 done:
4431 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4432 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4433 }
4434 
4435 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4436 					 void *data, u16 len)
4437 {
4438 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4439 	struct adv_monitor *m = NULL;
4440 	u8 status = MGMT_STATUS_SUCCESS;
4441 	size_t expected_size = sizeof(*cp);
4442 
4443 	BT_DBG("request for %s", hdev->name);
4444 
4445 	if (len <= sizeof(*cp)) {
4446 		status = MGMT_STATUS_INVALID_PARAMS;
4447 		goto done;
4448 	}
4449 
4450 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4451 	if (len != expected_size) {
4452 		status = MGMT_STATUS_INVALID_PARAMS;
4453 		goto done;
4454 	}
4455 
4456 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4457 	if (!m) {
4458 		status = MGMT_STATUS_NO_RESOURCES;
4459 		goto done;
4460 	}
4461 
4462 	INIT_LIST_HEAD(&m->patterns);
4463 
4464 	parse_adv_monitor_rssi(m, &cp->rssi);
4465 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4466 
4467 done:
4468 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4469 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4470 }
4471 
4472 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4473 {
4474 	struct mgmt_rp_remove_adv_monitor rp;
4475 	struct mgmt_cp_remove_adv_monitor *cp;
4476 	struct mgmt_pending_cmd *cmd;
4477 	int err = 0;
4478 
4479 	hci_dev_lock(hdev);
4480 
4481 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4482 	if (!cmd)
4483 		goto done;
4484 
4485 	cp = cmd->param;
4486 	rp.monitor_handle = cp->monitor_handle;
4487 
4488 	if (!status)
4489 		hci_update_background_scan(hdev);
4490 
4491 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4492 				mgmt_status(status), &rp, sizeof(rp));
4493 	mgmt_pending_remove(cmd);
4494 
4495 done:
4496 	hci_dev_unlock(hdev);
4497 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
4498 		   rp.monitor_handle, status);
4499 
4500 	return err;
4501 }
4502 
4503 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4504 			      void *data, u16 len)
4505 {
4506 	struct mgmt_cp_remove_adv_monitor *cp = data;
4507 	struct mgmt_rp_remove_adv_monitor rp;
4508 	struct mgmt_pending_cmd *cmd;
4509 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4510 	int err, status;
4511 	bool pending;
4512 
4513 	BT_DBG("request for %s", hdev->name);
4514 	rp.monitor_handle = cp->monitor_handle;
4515 
4516 	hci_dev_lock(hdev);
4517 
4518 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4519 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4520 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4521 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4522 		status = MGMT_STATUS_BUSY;
4523 		goto unlock;
4524 	}
4525 
4526 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4527 	if (!cmd) {
4528 		status = MGMT_STATUS_NO_RESOURCES;
4529 		goto unlock;
4530 	}
4531 
4532 	if (handle)
4533 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4534 	else
4535 		pending = hci_remove_all_adv_monitor(hdev, &err);
4536 
4537 	if (err) {
4538 		mgmt_pending_remove(cmd);
4539 
4540 		if (err == -ENOENT)
4541 			status = MGMT_STATUS_INVALID_INDEX;
4542 		else
4543 			status = MGMT_STATUS_FAILED;
4544 
4545 		goto unlock;
4546 	}
4547 
4548 	/* monitor can be removed without forwarding request to controller */
4549 	if (!pending) {
4550 		mgmt_pending_remove(cmd);
4551 		hci_dev_unlock(hdev);
4552 
4553 		return mgmt_cmd_complete(sk, hdev->id,
4554 					 MGMT_OP_REMOVE_ADV_MONITOR,
4555 					 MGMT_STATUS_SUCCESS,
4556 					 &rp, sizeof(rp));
4557 	}
4558 
4559 	hci_dev_unlock(hdev);
4560 	return 0;
4561 
4562 unlock:
4563 	hci_dev_unlock(hdev);
4564 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4565 			       status);
4566 }
4567 
4568 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4569 				         u16 opcode, struct sk_buff *skb)
4570 {
4571 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4572 	size_t rp_size = sizeof(mgmt_rp);
4573 	struct mgmt_pending_cmd *cmd;
4574 
4575 	bt_dev_dbg(hdev, "status %u", status);
4576 
4577 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4578 	if (!cmd)
4579 		return;
4580 
4581 	if (status || !skb) {
4582 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4583 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4584 		goto remove;
4585 	}
4586 
4587 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4588 
4589 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4590 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4591 
4592 		if (skb->len < sizeof(*rp)) {
4593 			mgmt_cmd_status(cmd->sk, hdev->id,
4594 					MGMT_OP_READ_LOCAL_OOB_DATA,
4595 					MGMT_STATUS_FAILED);
4596 			goto remove;
4597 		}
4598 
4599 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4600 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4601 
4602 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4603 	} else {
4604 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4605 
4606 		if (skb->len < sizeof(*rp)) {
4607 			mgmt_cmd_status(cmd->sk, hdev->id,
4608 					MGMT_OP_READ_LOCAL_OOB_DATA,
4609 					MGMT_STATUS_FAILED);
4610 			goto remove;
4611 		}
4612 
4613 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4614 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4615 
4616 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4617 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4618 	}
4619 
4620 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4621 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4622 
4623 remove:
4624 	mgmt_pending_remove(cmd);
4625 }
4626 
4627 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4628 			       void *data, u16 data_len)
4629 {
4630 	struct mgmt_pending_cmd *cmd;
4631 	struct hci_request req;
4632 	int err;
4633 
4634 	bt_dev_dbg(hdev, "sock %p", sk);
4635 
4636 	hci_dev_lock(hdev);
4637 
4638 	if (!hdev_is_powered(hdev)) {
4639 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4640 				      MGMT_STATUS_NOT_POWERED);
4641 		goto unlock;
4642 	}
4643 
4644 	if (!lmp_ssp_capable(hdev)) {
4645 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4646 				      MGMT_STATUS_NOT_SUPPORTED);
4647 		goto unlock;
4648 	}
4649 
4650 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4651 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4652 				      MGMT_STATUS_BUSY);
4653 		goto unlock;
4654 	}
4655 
4656 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4657 	if (!cmd) {
4658 		err = -ENOMEM;
4659 		goto unlock;
4660 	}
4661 
4662 	hci_req_init(&req, hdev);
4663 
4664 	if (bredr_sc_enabled(hdev))
4665 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4666 	else
4667 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4668 
4669 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4670 	if (err < 0)
4671 		mgmt_pending_remove(cmd);
4672 
4673 unlock:
4674 	hci_dev_unlock(hdev);
4675 	return err;
4676 }
4677 
4678 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4679 			       void *data, u16 len)
4680 {
4681 	struct mgmt_addr_info *addr = data;
4682 	int err;
4683 
4684 	bt_dev_dbg(hdev, "sock %p", sk);
4685 
4686 	if (!bdaddr_type_is_valid(addr->type))
4687 		return mgmt_cmd_complete(sk, hdev->id,
4688 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4689 					 MGMT_STATUS_INVALID_PARAMS,
4690 					 addr, sizeof(*addr));
4691 
4692 	hci_dev_lock(hdev);
4693 
4694 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4695 		struct mgmt_cp_add_remote_oob_data *cp = data;
4696 		u8 status;
4697 
4698 		if (cp->addr.type != BDADDR_BREDR) {
4699 			err = mgmt_cmd_complete(sk, hdev->id,
4700 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4701 						MGMT_STATUS_INVALID_PARAMS,
4702 						&cp->addr, sizeof(cp->addr));
4703 			goto unlock;
4704 		}
4705 
4706 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4707 					      cp->addr.type, cp->hash,
4708 					      cp->rand, NULL, NULL);
4709 		if (err < 0)
4710 			status = MGMT_STATUS_FAILED;
4711 		else
4712 			status = MGMT_STATUS_SUCCESS;
4713 
4714 		err = mgmt_cmd_complete(sk, hdev->id,
4715 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4716 					&cp->addr, sizeof(cp->addr));
4717 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4718 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4719 		u8 *rand192, *hash192, *rand256, *hash256;
4720 		u8 status;
4721 
4722 		if (bdaddr_type_is_le(cp->addr.type)) {
4723 			/* Enforce zero-valued 192-bit parameters as
4724 			 * long as legacy SMP OOB isn't implemented.
4725 			 */
4726 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4727 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4728 				err = mgmt_cmd_complete(sk, hdev->id,
4729 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4730 							MGMT_STATUS_INVALID_PARAMS,
4731 							addr, sizeof(*addr));
4732 				goto unlock;
4733 			}
4734 
4735 			rand192 = NULL;
4736 			hash192 = NULL;
4737 		} else {
4738 			/* In case one of the P-192 values is set to zero,
4739 			 * then just disable OOB data for P-192.
4740 			 */
4741 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4742 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4743 				rand192 = NULL;
4744 				hash192 = NULL;
4745 			} else {
4746 				rand192 = cp->rand192;
4747 				hash192 = cp->hash192;
4748 			}
4749 		}
4750 
4751 		/* In case one of the P-256 values is set to zero, then just
4752 		 * disable OOB data for P-256.
4753 		 */
4754 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4755 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4756 			rand256 = NULL;
4757 			hash256 = NULL;
4758 		} else {
4759 			rand256 = cp->rand256;
4760 			hash256 = cp->hash256;
4761 		}
4762 
4763 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4764 					      cp->addr.type, hash192, rand192,
4765 					      hash256, rand256);
4766 		if (err < 0)
4767 			status = MGMT_STATUS_FAILED;
4768 		else
4769 			status = MGMT_STATUS_SUCCESS;
4770 
4771 		err = mgmt_cmd_complete(sk, hdev->id,
4772 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4773 					status, &cp->addr, sizeof(cp->addr));
4774 	} else {
4775 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4776 			   len);
4777 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4778 				      MGMT_STATUS_INVALID_PARAMS);
4779 	}
4780 
4781 unlock:
4782 	hci_dev_unlock(hdev);
4783 	return err;
4784 }
4785 
4786 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4787 				  void *data, u16 len)
4788 {
4789 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4790 	u8 status;
4791 	int err;
4792 
4793 	bt_dev_dbg(hdev, "sock %p", sk);
4794 
4795 	if (cp->addr.type != BDADDR_BREDR)
4796 		return mgmt_cmd_complete(sk, hdev->id,
4797 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4798 					 MGMT_STATUS_INVALID_PARAMS,
4799 					 &cp->addr, sizeof(cp->addr));
4800 
4801 	hci_dev_lock(hdev);
4802 
4803 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4804 		hci_remote_oob_data_clear(hdev);
4805 		status = MGMT_STATUS_SUCCESS;
4806 		goto done;
4807 	}
4808 
4809 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4810 	if (err < 0)
4811 		status = MGMT_STATUS_INVALID_PARAMS;
4812 	else
4813 		status = MGMT_STATUS_SUCCESS;
4814 
4815 done:
4816 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4817 				status, &cp->addr, sizeof(cp->addr));
4818 
4819 	hci_dev_unlock(hdev);
4820 	return err;
4821 }
4822 
4823 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4824 {
4825 	struct mgmt_pending_cmd *cmd;
4826 
4827 	bt_dev_dbg(hdev, "status %d", status);
4828 
4829 	hci_dev_lock(hdev);
4830 
4831 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4832 	if (!cmd)
4833 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4834 
4835 	if (!cmd)
4836 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4837 
4838 	if (cmd) {
4839 		cmd->cmd_complete(cmd, mgmt_status(status));
4840 		mgmt_pending_remove(cmd);
4841 	}
4842 
4843 	hci_dev_unlock(hdev);
4844 
4845 	/* Handle suspend notifier */
4846 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4847 			       hdev->suspend_tasks)) {
4848 		bt_dev_dbg(hdev, "Unpaused discovery");
4849 		wake_up(&hdev->suspend_wait_q);
4850 	}
4851 }
4852 
4853 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4854 				    uint8_t *mgmt_status)
4855 {
4856 	switch (type) {
4857 	case DISCOV_TYPE_LE:
4858 		*mgmt_status = mgmt_le_support(hdev);
4859 		if (*mgmt_status)
4860 			return false;
4861 		break;
4862 	case DISCOV_TYPE_INTERLEAVED:
4863 		*mgmt_status = mgmt_le_support(hdev);
4864 		if (*mgmt_status)
4865 			return false;
4866 		fallthrough;
4867 	case DISCOV_TYPE_BREDR:
4868 		*mgmt_status = mgmt_bredr_support(hdev);
4869 		if (*mgmt_status)
4870 			return false;
4871 		break;
4872 	default:
4873 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4874 		return false;
4875 	}
4876 
4877 	return true;
4878 }
4879 
4880 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4881 				    u16 op, void *data, u16 len)
4882 {
4883 	struct mgmt_cp_start_discovery *cp = data;
4884 	struct mgmt_pending_cmd *cmd;
4885 	u8 status;
4886 	int err;
4887 
4888 	bt_dev_dbg(hdev, "sock %p", sk);
4889 
4890 	hci_dev_lock(hdev);
4891 
4892 	if (!hdev_is_powered(hdev)) {
4893 		err = mgmt_cmd_complete(sk, hdev->id, op,
4894 					MGMT_STATUS_NOT_POWERED,
4895 					&cp->type, sizeof(cp->type));
4896 		goto failed;
4897 	}
4898 
4899 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4900 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4901 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4902 					&cp->type, sizeof(cp->type));
4903 		goto failed;
4904 	}
4905 
4906 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4907 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4908 					&cp->type, sizeof(cp->type));
4909 		goto failed;
4910 	}
4911 
4912 	/* Can't start discovery when it is paused */
4913 	if (hdev->discovery_paused) {
4914 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4915 					&cp->type, sizeof(cp->type));
4916 		goto failed;
4917 	}
4918 
4919 	/* Clear the discovery filter first to free any previously
4920 	 * allocated memory for the UUID list.
4921 	 */
4922 	hci_discovery_filter_clear(hdev);
4923 
4924 	hdev->discovery.type = cp->type;
4925 	hdev->discovery.report_invalid_rssi = false;
4926 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4927 		hdev->discovery.limited = true;
4928 	else
4929 		hdev->discovery.limited = false;
4930 
4931 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4932 	if (!cmd) {
4933 		err = -ENOMEM;
4934 		goto failed;
4935 	}
4936 
4937 	cmd->cmd_complete = generic_cmd_complete;
4938 
4939 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4940 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4941 	err = 0;
4942 
4943 failed:
4944 	hci_dev_unlock(hdev);
4945 	return err;
4946 }
4947 
4948 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4949 			   void *data, u16 len)
4950 {
4951 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4952 					data, len);
4953 }
4954 
4955 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4956 				   void *data, u16 len)
4957 {
4958 	return start_discovery_internal(sk, hdev,
4959 					MGMT_OP_START_LIMITED_DISCOVERY,
4960 					data, len);
4961 }
4962 
4963 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4964 					  u8 status)
4965 {
4966 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4967 				 cmd->param, 1);
4968 }
4969 
4970 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4971 				   void *data, u16 len)
4972 {
4973 	struct mgmt_cp_start_service_discovery *cp = data;
4974 	struct mgmt_pending_cmd *cmd;
4975 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4976 	u16 uuid_count, expected_len;
4977 	u8 status;
4978 	int err;
4979 
4980 	bt_dev_dbg(hdev, "sock %p", sk);
4981 
4982 	hci_dev_lock(hdev);
4983 
4984 	if (!hdev_is_powered(hdev)) {
4985 		err = mgmt_cmd_complete(sk, hdev->id,
4986 					MGMT_OP_START_SERVICE_DISCOVERY,
4987 					MGMT_STATUS_NOT_POWERED,
4988 					&cp->type, sizeof(cp->type));
4989 		goto failed;
4990 	}
4991 
4992 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4993 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4994 		err = mgmt_cmd_complete(sk, hdev->id,
4995 					MGMT_OP_START_SERVICE_DISCOVERY,
4996 					MGMT_STATUS_BUSY, &cp->type,
4997 					sizeof(cp->type));
4998 		goto failed;
4999 	}
5000 
5001 	if (hdev->discovery_paused) {
5002 		err = mgmt_cmd_complete(sk, hdev->id,
5003 					MGMT_OP_START_SERVICE_DISCOVERY,
5004 					MGMT_STATUS_BUSY, &cp->type,
5005 					sizeof(cp->type));
5006 		goto failed;
5007 	}
5008 
5009 	uuid_count = __le16_to_cpu(cp->uuid_count);
5010 	if (uuid_count > max_uuid_count) {
5011 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5012 			   uuid_count);
5013 		err = mgmt_cmd_complete(sk, hdev->id,
5014 					MGMT_OP_START_SERVICE_DISCOVERY,
5015 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5016 					sizeof(cp->type));
5017 		goto failed;
5018 	}
5019 
5020 	expected_len = sizeof(*cp) + uuid_count * 16;
5021 	if (expected_len != len) {
5022 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5023 			   expected_len, len);
5024 		err = mgmt_cmd_complete(sk, hdev->id,
5025 					MGMT_OP_START_SERVICE_DISCOVERY,
5026 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5027 					sizeof(cp->type));
5028 		goto failed;
5029 	}
5030 
5031 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5032 		err = mgmt_cmd_complete(sk, hdev->id,
5033 					MGMT_OP_START_SERVICE_DISCOVERY,
5034 					status, &cp->type, sizeof(cp->type));
5035 		goto failed;
5036 	}
5037 
5038 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5039 			       hdev, data, len);
5040 	if (!cmd) {
5041 		err = -ENOMEM;
5042 		goto failed;
5043 	}
5044 
5045 	cmd->cmd_complete = service_discovery_cmd_complete;
5046 
5047 	/* Clear the discovery filter first to free any previously
5048 	 * allocated memory for the UUID list.
5049 	 */
5050 	hci_discovery_filter_clear(hdev);
5051 
5052 	hdev->discovery.result_filtering = true;
5053 	hdev->discovery.type = cp->type;
5054 	hdev->discovery.rssi = cp->rssi;
5055 	hdev->discovery.uuid_count = uuid_count;
5056 
5057 	if (uuid_count > 0) {
5058 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5059 						GFP_KERNEL);
5060 		if (!hdev->discovery.uuids) {
5061 			err = mgmt_cmd_complete(sk, hdev->id,
5062 						MGMT_OP_START_SERVICE_DISCOVERY,
5063 						MGMT_STATUS_FAILED,
5064 						&cp->type, sizeof(cp->type));
5065 			mgmt_pending_remove(cmd);
5066 			goto failed;
5067 		}
5068 	}
5069 
5070 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5071 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5072 	err = 0;
5073 
5074 failed:
5075 	hci_dev_unlock(hdev);
5076 	return err;
5077 }
5078 
5079 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5080 {
5081 	struct mgmt_pending_cmd *cmd;
5082 
5083 	bt_dev_dbg(hdev, "status %d", status);
5084 
5085 	hci_dev_lock(hdev);
5086 
5087 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5088 	if (cmd) {
5089 		cmd->cmd_complete(cmd, mgmt_status(status));
5090 		mgmt_pending_remove(cmd);
5091 	}
5092 
5093 	hci_dev_unlock(hdev);
5094 
5095 	/* Handle suspend notifier */
5096 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5097 		bt_dev_dbg(hdev, "Paused discovery");
5098 		wake_up(&hdev->suspend_wait_q);
5099 	}
5100 }
5101 
5102 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5103 			  u16 len)
5104 {
5105 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5106 	struct mgmt_pending_cmd *cmd;
5107 	int err;
5108 
5109 	bt_dev_dbg(hdev, "sock %p", sk);
5110 
5111 	hci_dev_lock(hdev);
5112 
5113 	if (!hci_discovery_active(hdev)) {
5114 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5115 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5116 					sizeof(mgmt_cp->type));
5117 		goto unlock;
5118 	}
5119 
5120 	if (hdev->discovery.type != mgmt_cp->type) {
5121 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5122 					MGMT_STATUS_INVALID_PARAMS,
5123 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5124 		goto unlock;
5125 	}
5126 
5127 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5128 	if (!cmd) {
5129 		err = -ENOMEM;
5130 		goto unlock;
5131 	}
5132 
5133 	cmd->cmd_complete = generic_cmd_complete;
5134 
5135 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5136 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5137 	err = 0;
5138 
5139 unlock:
5140 	hci_dev_unlock(hdev);
5141 	return err;
5142 }
5143 
5144 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5145 			u16 len)
5146 {
5147 	struct mgmt_cp_confirm_name *cp = data;
5148 	struct inquiry_entry *e;
5149 	int err;
5150 
5151 	bt_dev_dbg(hdev, "sock %p", sk);
5152 
5153 	hci_dev_lock(hdev);
5154 
5155 	if (!hci_discovery_active(hdev)) {
5156 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5157 					MGMT_STATUS_FAILED, &cp->addr,
5158 					sizeof(cp->addr));
5159 		goto failed;
5160 	}
5161 
5162 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5163 	if (!e) {
5164 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5165 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5166 					sizeof(cp->addr));
5167 		goto failed;
5168 	}
5169 
5170 	if (cp->name_known) {
5171 		e->name_state = NAME_KNOWN;
5172 		list_del(&e->list);
5173 	} else {
5174 		e->name_state = NAME_NEEDED;
5175 		hci_inquiry_cache_update_resolve(hdev, e);
5176 	}
5177 
5178 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5179 				&cp->addr, sizeof(cp->addr));
5180 
5181 failed:
5182 	hci_dev_unlock(hdev);
5183 	return err;
5184 }
5185 
5186 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5187 			u16 len)
5188 {
5189 	struct mgmt_cp_block_device *cp = data;
5190 	u8 status;
5191 	int err;
5192 
5193 	bt_dev_dbg(hdev, "sock %p", sk);
5194 
5195 	if (!bdaddr_type_is_valid(cp->addr.type))
5196 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5197 					 MGMT_STATUS_INVALID_PARAMS,
5198 					 &cp->addr, sizeof(cp->addr));
5199 
5200 	hci_dev_lock(hdev);
5201 
5202 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
5203 				  cp->addr.type);
5204 	if (err < 0) {
5205 		status = MGMT_STATUS_FAILED;
5206 		goto done;
5207 	}
5208 
5209 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5210 		   sk);
5211 	status = MGMT_STATUS_SUCCESS;
5212 
5213 done:
5214 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5215 				&cp->addr, sizeof(cp->addr));
5216 
5217 	hci_dev_unlock(hdev);
5218 
5219 	return err;
5220 }
5221 
5222 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5223 			  u16 len)
5224 {
5225 	struct mgmt_cp_unblock_device *cp = data;
5226 	u8 status;
5227 	int err;
5228 
5229 	bt_dev_dbg(hdev, "sock %p", sk);
5230 
5231 	if (!bdaddr_type_is_valid(cp->addr.type))
5232 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5233 					 MGMT_STATUS_INVALID_PARAMS,
5234 					 &cp->addr, sizeof(cp->addr));
5235 
5236 	hci_dev_lock(hdev);
5237 
5238 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5239 				  cp->addr.type);
5240 	if (err < 0) {
5241 		status = MGMT_STATUS_INVALID_PARAMS;
5242 		goto done;
5243 	}
5244 
5245 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5246 		   sk);
5247 	status = MGMT_STATUS_SUCCESS;
5248 
5249 done:
5250 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5251 				&cp->addr, sizeof(cp->addr));
5252 
5253 	hci_dev_unlock(hdev);
5254 
5255 	return err;
5256 }
5257 
5258 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5259 			 u16 len)
5260 {
5261 	struct mgmt_cp_set_device_id *cp = data;
5262 	struct hci_request req;
5263 	int err;
5264 	__u16 source;
5265 
5266 	bt_dev_dbg(hdev, "sock %p", sk);
5267 
5268 	source = __le16_to_cpu(cp->source);
5269 
5270 	if (source > 0x0002)
5271 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5272 				       MGMT_STATUS_INVALID_PARAMS);
5273 
5274 	hci_dev_lock(hdev);
5275 
5276 	hdev->devid_source = source;
5277 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5278 	hdev->devid_product = __le16_to_cpu(cp->product);
5279 	hdev->devid_version = __le16_to_cpu(cp->version);
5280 
5281 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5282 				NULL, 0);
5283 
5284 	hci_req_init(&req, hdev);
5285 	__hci_req_update_eir(&req);
5286 	hci_req_run(&req, NULL);
5287 
5288 	hci_dev_unlock(hdev);
5289 
5290 	return err;
5291 }
5292 
5293 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5294 					u16 opcode)
5295 {
5296 	bt_dev_dbg(hdev, "status %d", status);
5297 }
5298 
5299 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5300 				     u16 opcode)
5301 {
5302 	struct cmd_lookup match = { NULL, hdev };
5303 	struct hci_request req;
5304 	u8 instance;
5305 	struct adv_info *adv_instance;
5306 	int err;
5307 
5308 	hci_dev_lock(hdev);
5309 
5310 	if (status) {
5311 		u8 mgmt_err = mgmt_status(status);
5312 
5313 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5314 				     cmd_status_rsp, &mgmt_err);
5315 		goto unlock;
5316 	}
5317 
5318 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5319 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5320 	else
5321 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5322 
5323 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5324 			     &match);
5325 
5326 	new_settings(hdev, match.sk);
5327 
5328 	if (match.sk)
5329 		sock_put(match.sk);
5330 
5331 	/* Handle suspend notifier */
5332 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5333 			       hdev->suspend_tasks)) {
5334 		bt_dev_dbg(hdev, "Paused advertising");
5335 		wake_up(&hdev->suspend_wait_q);
5336 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5337 				      hdev->suspend_tasks)) {
5338 		bt_dev_dbg(hdev, "Unpaused advertising");
5339 		wake_up(&hdev->suspend_wait_q);
5340 	}
5341 
5342 	/* If "Set Advertising" was just disabled and instance advertising was
5343 	 * set up earlier, then re-enable multi-instance advertising.
5344 	 */
5345 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5346 	    list_empty(&hdev->adv_instances))
5347 		goto unlock;
5348 
5349 	instance = hdev->cur_adv_instance;
5350 	if (!instance) {
5351 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5352 							struct adv_info, list);
5353 		if (!adv_instance)
5354 			goto unlock;
5355 
5356 		instance = adv_instance->instance;
5357 	}
5358 
5359 	hci_req_init(&req, hdev);
5360 
5361 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5362 
5363 	if (!err)
5364 		err = hci_req_run(&req, enable_advertising_instance);
5365 
5366 	if (err)
5367 		bt_dev_err(hdev, "failed to re-configure advertising");
5368 
5369 unlock:
5370 	hci_dev_unlock(hdev);
5371 }
5372 
5373 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5374 			   u16 len)
5375 {
5376 	struct mgmt_mode *cp = data;
5377 	struct mgmt_pending_cmd *cmd;
5378 	struct hci_request req;
5379 	u8 val, status;
5380 	int err;
5381 
5382 	bt_dev_dbg(hdev, "sock %p", sk);
5383 
5384 	status = mgmt_le_support(hdev);
5385 	if (status)
5386 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5387 				       status);
5388 
5389 	/* Enabling the experimental LL Privay support disables support for
5390 	 * advertising.
5391 	 */
5392 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5393 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5394 				       MGMT_STATUS_NOT_SUPPORTED);
5395 
5396 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5397 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5398 				       MGMT_STATUS_INVALID_PARAMS);
5399 
5400 	if (hdev->advertising_paused)
5401 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5402 				       MGMT_STATUS_BUSY);
5403 
5404 	hci_dev_lock(hdev);
5405 
5406 	val = !!cp->val;
5407 
5408 	/* The following conditions are ones which mean that we should
5409 	 * not do any HCI communication but directly send a mgmt
5410 	 * response to user space (after toggling the flag if
5411 	 * necessary).
5412 	 */
5413 	if (!hdev_is_powered(hdev) ||
5414 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5415 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5416 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5417 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5418 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5419 		bool changed;
5420 
5421 		if (cp->val) {
5422 			hdev->cur_adv_instance = 0x00;
5423 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5424 			if (cp->val == 0x02)
5425 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5426 			else
5427 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5428 		} else {
5429 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5430 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5431 		}
5432 
5433 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5434 		if (err < 0)
5435 			goto unlock;
5436 
5437 		if (changed)
5438 			err = new_settings(hdev, sk);
5439 
5440 		goto unlock;
5441 	}
5442 
5443 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5444 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5445 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5446 				      MGMT_STATUS_BUSY);
5447 		goto unlock;
5448 	}
5449 
5450 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5451 	if (!cmd) {
5452 		err = -ENOMEM;
5453 		goto unlock;
5454 	}
5455 
5456 	hci_req_init(&req, hdev);
5457 
5458 	if (cp->val == 0x02)
5459 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5460 	else
5461 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5462 
5463 	cancel_adv_timeout(hdev);
5464 
5465 	if (val) {
5466 		/* Switch to instance "0" for the Set Advertising setting.
5467 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5468 		 * HCI_ADVERTISING flag is not yet set.
5469 		 */
5470 		hdev->cur_adv_instance = 0x00;
5471 
5472 		if (ext_adv_capable(hdev)) {
5473 			__hci_req_start_ext_adv(&req, 0x00);
5474 		} else {
5475 			__hci_req_update_adv_data(&req, 0x00);
5476 			__hci_req_update_scan_rsp_data(&req, 0x00);
5477 			__hci_req_enable_advertising(&req);
5478 		}
5479 	} else {
5480 		__hci_req_disable_advertising(&req);
5481 	}
5482 
5483 	err = hci_req_run(&req, set_advertising_complete);
5484 	if (err < 0)
5485 		mgmt_pending_remove(cmd);
5486 
5487 unlock:
5488 	hci_dev_unlock(hdev);
5489 	return err;
5490 }
5491 
5492 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5493 			      void *data, u16 len)
5494 {
5495 	struct mgmt_cp_set_static_address *cp = data;
5496 	int err;
5497 
5498 	bt_dev_dbg(hdev, "sock %p", sk);
5499 
5500 	if (!lmp_le_capable(hdev))
5501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5502 				       MGMT_STATUS_NOT_SUPPORTED);
5503 
5504 	if (hdev_is_powered(hdev))
5505 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5506 				       MGMT_STATUS_REJECTED);
5507 
5508 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5509 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5510 			return mgmt_cmd_status(sk, hdev->id,
5511 					       MGMT_OP_SET_STATIC_ADDRESS,
5512 					       MGMT_STATUS_INVALID_PARAMS);
5513 
5514 		/* Two most significant bits shall be set */
5515 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5516 			return mgmt_cmd_status(sk, hdev->id,
5517 					       MGMT_OP_SET_STATIC_ADDRESS,
5518 					       MGMT_STATUS_INVALID_PARAMS);
5519 	}
5520 
5521 	hci_dev_lock(hdev);
5522 
5523 	bacpy(&hdev->static_addr, &cp->bdaddr);
5524 
5525 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5526 	if (err < 0)
5527 		goto unlock;
5528 
5529 	err = new_settings(hdev, sk);
5530 
5531 unlock:
5532 	hci_dev_unlock(hdev);
5533 	return err;
5534 }
5535 
5536 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5537 			   void *data, u16 len)
5538 {
5539 	struct mgmt_cp_set_scan_params *cp = data;
5540 	__u16 interval, window;
5541 	int err;
5542 
5543 	bt_dev_dbg(hdev, "sock %p", sk);
5544 
5545 	if (!lmp_le_capable(hdev))
5546 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5547 				       MGMT_STATUS_NOT_SUPPORTED);
5548 
5549 	interval = __le16_to_cpu(cp->interval);
5550 
5551 	if (interval < 0x0004 || interval > 0x4000)
5552 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5553 				       MGMT_STATUS_INVALID_PARAMS);
5554 
5555 	window = __le16_to_cpu(cp->window);
5556 
5557 	if (window < 0x0004 || window > 0x4000)
5558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5559 				       MGMT_STATUS_INVALID_PARAMS);
5560 
5561 	if (window > interval)
5562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5563 				       MGMT_STATUS_INVALID_PARAMS);
5564 
5565 	hci_dev_lock(hdev);
5566 
5567 	hdev->le_scan_interval = interval;
5568 	hdev->le_scan_window = window;
5569 
5570 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5571 				NULL, 0);
5572 
5573 	/* If background scan is running, restart it so new parameters are
5574 	 * loaded.
5575 	 */
5576 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5577 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5578 		struct hci_request req;
5579 
5580 		hci_req_init(&req, hdev);
5581 
5582 		hci_req_add_le_scan_disable(&req, false);
5583 		hci_req_add_le_passive_scan(&req);
5584 
5585 		hci_req_run(&req, NULL);
5586 	}
5587 
5588 	hci_dev_unlock(hdev);
5589 
5590 	return err;
5591 }
5592 
5593 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5594 				      u16 opcode)
5595 {
5596 	struct mgmt_pending_cmd *cmd;
5597 
5598 	bt_dev_dbg(hdev, "status 0x%02x", status);
5599 
5600 	hci_dev_lock(hdev);
5601 
5602 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5603 	if (!cmd)
5604 		goto unlock;
5605 
5606 	if (status) {
5607 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5608 			        mgmt_status(status));
5609 	} else {
5610 		struct mgmt_mode *cp = cmd->param;
5611 
5612 		if (cp->val)
5613 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5614 		else
5615 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5616 
5617 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5618 		new_settings(hdev, cmd->sk);
5619 	}
5620 
5621 	mgmt_pending_remove(cmd);
5622 
5623 unlock:
5624 	hci_dev_unlock(hdev);
5625 }
5626 
5627 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5628 				void *data, u16 len)
5629 {
5630 	struct mgmt_mode *cp = data;
5631 	struct mgmt_pending_cmd *cmd;
5632 	struct hci_request req;
5633 	int err;
5634 
5635 	bt_dev_dbg(hdev, "sock %p", sk);
5636 
5637 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5638 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5639 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5640 				       MGMT_STATUS_NOT_SUPPORTED);
5641 
5642 	if (cp->val != 0x00 && cp->val != 0x01)
5643 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5644 				       MGMT_STATUS_INVALID_PARAMS);
5645 
5646 	hci_dev_lock(hdev);
5647 
5648 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5649 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5650 				      MGMT_STATUS_BUSY);
5651 		goto unlock;
5652 	}
5653 
5654 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5655 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5656 					hdev);
5657 		goto unlock;
5658 	}
5659 
5660 	if (!hdev_is_powered(hdev)) {
5661 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5662 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5663 					hdev);
5664 		new_settings(hdev, sk);
5665 		goto unlock;
5666 	}
5667 
5668 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5669 			       data, len);
5670 	if (!cmd) {
5671 		err = -ENOMEM;
5672 		goto unlock;
5673 	}
5674 
5675 	hci_req_init(&req, hdev);
5676 
5677 	__hci_req_write_fast_connectable(&req, cp->val);
5678 
5679 	err = hci_req_run(&req, fast_connectable_complete);
5680 	if (err < 0) {
5681 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5682 				      MGMT_STATUS_FAILED);
5683 		mgmt_pending_remove(cmd);
5684 	}
5685 
5686 unlock:
5687 	hci_dev_unlock(hdev);
5688 
5689 	return err;
5690 }
5691 
5692 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5693 {
5694 	struct mgmt_pending_cmd *cmd;
5695 
5696 	bt_dev_dbg(hdev, "status 0x%02x", status);
5697 
5698 	hci_dev_lock(hdev);
5699 
5700 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5701 	if (!cmd)
5702 		goto unlock;
5703 
5704 	if (status) {
5705 		u8 mgmt_err = mgmt_status(status);
5706 
5707 		/* We need to restore the flag if related HCI commands
5708 		 * failed.
5709 		 */
5710 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5711 
5712 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5713 	} else {
5714 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5715 		new_settings(hdev, cmd->sk);
5716 	}
5717 
5718 	mgmt_pending_remove(cmd);
5719 
5720 unlock:
5721 	hci_dev_unlock(hdev);
5722 }
5723 
5724 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5725 {
5726 	struct mgmt_mode *cp = data;
5727 	struct mgmt_pending_cmd *cmd;
5728 	struct hci_request req;
5729 	int err;
5730 
5731 	bt_dev_dbg(hdev, "sock %p", sk);
5732 
5733 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5735 				       MGMT_STATUS_NOT_SUPPORTED);
5736 
5737 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5738 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5739 				       MGMT_STATUS_REJECTED);
5740 
5741 	if (cp->val != 0x00 && cp->val != 0x01)
5742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5743 				       MGMT_STATUS_INVALID_PARAMS);
5744 
5745 	hci_dev_lock(hdev);
5746 
5747 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5748 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5749 		goto unlock;
5750 	}
5751 
5752 	if (!hdev_is_powered(hdev)) {
5753 		if (!cp->val) {
5754 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5755 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5756 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5757 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5758 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5759 		}
5760 
5761 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5762 
5763 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5764 		if (err < 0)
5765 			goto unlock;
5766 
5767 		err = new_settings(hdev, sk);
5768 		goto unlock;
5769 	}
5770 
5771 	/* Reject disabling when powered on */
5772 	if (!cp->val) {
5773 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5774 				      MGMT_STATUS_REJECTED);
5775 		goto unlock;
5776 	} else {
5777 		/* When configuring a dual-mode controller to operate
5778 		 * with LE only and using a static address, then switching
5779 		 * BR/EDR back on is not allowed.
5780 		 *
5781 		 * Dual-mode controllers shall operate with the public
5782 		 * address as its identity address for BR/EDR and LE. So
5783 		 * reject the attempt to create an invalid configuration.
5784 		 *
5785 		 * The same restrictions applies when secure connections
5786 		 * has been enabled. For BR/EDR this is a controller feature
5787 		 * while for LE it is a host stack feature. This means that
5788 		 * switching BR/EDR back on when secure connections has been
5789 		 * enabled is not a supported transaction.
5790 		 */
5791 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5792 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5793 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5794 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5795 					      MGMT_STATUS_REJECTED);
5796 			goto unlock;
5797 		}
5798 	}
5799 
5800 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5801 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5802 				      MGMT_STATUS_BUSY);
5803 		goto unlock;
5804 	}
5805 
5806 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5807 	if (!cmd) {
5808 		err = -ENOMEM;
5809 		goto unlock;
5810 	}
5811 
5812 	/* We need to flip the bit already here so that
5813 	 * hci_req_update_adv_data generates the correct flags.
5814 	 */
5815 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5816 
5817 	hci_req_init(&req, hdev);
5818 
5819 	__hci_req_write_fast_connectable(&req, false);
5820 	__hci_req_update_scan(&req);
5821 
5822 	/* Since only the advertising data flags will change, there
5823 	 * is no need to update the scan response data.
5824 	 */
5825 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5826 
5827 	err = hci_req_run(&req, set_bredr_complete);
5828 	if (err < 0)
5829 		mgmt_pending_remove(cmd);
5830 
5831 unlock:
5832 	hci_dev_unlock(hdev);
5833 	return err;
5834 }
5835 
5836 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5837 {
5838 	struct mgmt_pending_cmd *cmd;
5839 	struct mgmt_mode *cp;
5840 
5841 	bt_dev_dbg(hdev, "status %u", status);
5842 
5843 	hci_dev_lock(hdev);
5844 
5845 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5846 	if (!cmd)
5847 		goto unlock;
5848 
5849 	if (status) {
5850 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5851 			        mgmt_status(status));
5852 		goto remove;
5853 	}
5854 
5855 	cp = cmd->param;
5856 
5857 	switch (cp->val) {
5858 	case 0x00:
5859 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5860 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5861 		break;
5862 	case 0x01:
5863 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5864 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5865 		break;
5866 	case 0x02:
5867 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5868 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5869 		break;
5870 	}
5871 
5872 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5873 	new_settings(hdev, cmd->sk);
5874 
5875 remove:
5876 	mgmt_pending_remove(cmd);
5877 unlock:
5878 	hci_dev_unlock(hdev);
5879 }
5880 
5881 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5882 			   void *data, u16 len)
5883 {
5884 	struct mgmt_mode *cp = data;
5885 	struct mgmt_pending_cmd *cmd;
5886 	struct hci_request req;
5887 	u8 val;
5888 	int err;
5889 
5890 	bt_dev_dbg(hdev, "sock %p", sk);
5891 
5892 	if (!lmp_sc_capable(hdev) &&
5893 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5894 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5895 				       MGMT_STATUS_NOT_SUPPORTED);
5896 
5897 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5898 	    lmp_sc_capable(hdev) &&
5899 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5901 				       MGMT_STATUS_REJECTED);
5902 
5903 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5904 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5905 				  MGMT_STATUS_INVALID_PARAMS);
5906 
5907 	hci_dev_lock(hdev);
5908 
5909 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5910 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5911 		bool changed;
5912 
5913 		if (cp->val) {
5914 			changed = !hci_dev_test_and_set_flag(hdev,
5915 							     HCI_SC_ENABLED);
5916 			if (cp->val == 0x02)
5917 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5918 			else
5919 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5920 		} else {
5921 			changed = hci_dev_test_and_clear_flag(hdev,
5922 							      HCI_SC_ENABLED);
5923 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5924 		}
5925 
5926 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5927 		if (err < 0)
5928 			goto failed;
5929 
5930 		if (changed)
5931 			err = new_settings(hdev, sk);
5932 
5933 		goto failed;
5934 	}
5935 
5936 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5937 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5938 				      MGMT_STATUS_BUSY);
5939 		goto failed;
5940 	}
5941 
5942 	val = !!cp->val;
5943 
5944 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5945 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5946 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5947 		goto failed;
5948 	}
5949 
5950 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5951 	if (!cmd) {
5952 		err = -ENOMEM;
5953 		goto failed;
5954 	}
5955 
5956 	hci_req_init(&req, hdev);
5957 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5958 	err = hci_req_run(&req, sc_enable_complete);
5959 	if (err < 0) {
5960 		mgmt_pending_remove(cmd);
5961 		goto failed;
5962 	}
5963 
5964 failed:
5965 	hci_dev_unlock(hdev);
5966 	return err;
5967 }
5968 
5969 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5970 			  void *data, u16 len)
5971 {
5972 	struct mgmt_mode *cp = data;
5973 	bool changed, use_changed;
5974 	int err;
5975 
5976 	bt_dev_dbg(hdev, "sock %p", sk);
5977 
5978 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5980 				       MGMT_STATUS_INVALID_PARAMS);
5981 
5982 	hci_dev_lock(hdev);
5983 
5984 	if (cp->val)
5985 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5986 	else
5987 		changed = hci_dev_test_and_clear_flag(hdev,
5988 						      HCI_KEEP_DEBUG_KEYS);
5989 
5990 	if (cp->val == 0x02)
5991 		use_changed = !hci_dev_test_and_set_flag(hdev,
5992 							 HCI_USE_DEBUG_KEYS);
5993 	else
5994 		use_changed = hci_dev_test_and_clear_flag(hdev,
5995 							  HCI_USE_DEBUG_KEYS);
5996 
5997 	if (hdev_is_powered(hdev) && use_changed &&
5998 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5999 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6000 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6001 			     sizeof(mode), &mode);
6002 	}
6003 
6004 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6005 	if (err < 0)
6006 		goto unlock;
6007 
6008 	if (changed)
6009 		err = new_settings(hdev, sk);
6010 
6011 unlock:
6012 	hci_dev_unlock(hdev);
6013 	return err;
6014 }
6015 
6016 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6017 		       u16 len)
6018 {
6019 	struct mgmt_cp_set_privacy *cp = cp_data;
6020 	bool changed;
6021 	int err;
6022 
6023 	bt_dev_dbg(hdev, "sock %p", sk);
6024 
6025 	if (!lmp_le_capable(hdev))
6026 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6027 				       MGMT_STATUS_NOT_SUPPORTED);
6028 
6029 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6031 				       MGMT_STATUS_INVALID_PARAMS);
6032 
6033 	if (hdev_is_powered(hdev))
6034 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6035 				       MGMT_STATUS_REJECTED);
6036 
6037 	hci_dev_lock(hdev);
6038 
6039 	/* If user space supports this command it is also expected to
6040 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6041 	 */
6042 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6043 
6044 	if (cp->privacy) {
6045 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6046 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6047 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6048 		hci_adv_instances_set_rpa_expired(hdev, true);
6049 		if (cp->privacy == 0x02)
6050 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6051 		else
6052 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6053 	} else {
6054 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6055 		memset(hdev->irk, 0, sizeof(hdev->irk));
6056 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6057 		hci_adv_instances_set_rpa_expired(hdev, false);
6058 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6059 	}
6060 
6061 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6062 	if (err < 0)
6063 		goto unlock;
6064 
6065 	if (changed)
6066 		err = new_settings(hdev, sk);
6067 
6068 unlock:
6069 	hci_dev_unlock(hdev);
6070 	return err;
6071 }
6072 
6073 static bool irk_is_valid(struct mgmt_irk_info *irk)
6074 {
6075 	switch (irk->addr.type) {
6076 	case BDADDR_LE_PUBLIC:
6077 		return true;
6078 
6079 	case BDADDR_LE_RANDOM:
6080 		/* Two most significant bits shall be set */
6081 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6082 			return false;
6083 		return true;
6084 	}
6085 
6086 	return false;
6087 }
6088 
6089 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6090 		     u16 len)
6091 {
6092 	struct mgmt_cp_load_irks *cp = cp_data;
6093 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6094 				   sizeof(struct mgmt_irk_info));
6095 	u16 irk_count, expected_len;
6096 	int i, err;
6097 
6098 	bt_dev_dbg(hdev, "sock %p", sk);
6099 
6100 	if (!lmp_le_capable(hdev))
6101 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6102 				       MGMT_STATUS_NOT_SUPPORTED);
6103 
6104 	irk_count = __le16_to_cpu(cp->irk_count);
6105 	if (irk_count > max_irk_count) {
6106 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6107 			   irk_count);
6108 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6109 				       MGMT_STATUS_INVALID_PARAMS);
6110 	}
6111 
6112 	expected_len = struct_size(cp, irks, irk_count);
6113 	if (expected_len != len) {
6114 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6115 			   expected_len, len);
6116 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6117 				       MGMT_STATUS_INVALID_PARAMS);
6118 	}
6119 
6120 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6121 
6122 	for (i = 0; i < irk_count; i++) {
6123 		struct mgmt_irk_info *key = &cp->irks[i];
6124 
6125 		if (!irk_is_valid(key))
6126 			return mgmt_cmd_status(sk, hdev->id,
6127 					       MGMT_OP_LOAD_IRKS,
6128 					       MGMT_STATUS_INVALID_PARAMS);
6129 	}
6130 
6131 	hci_dev_lock(hdev);
6132 
6133 	hci_smp_irks_clear(hdev);
6134 
6135 	for (i = 0; i < irk_count; i++) {
6136 		struct mgmt_irk_info *irk = &cp->irks[i];
6137 
6138 		if (hci_is_blocked_key(hdev,
6139 				       HCI_BLOCKED_KEY_TYPE_IRK,
6140 				       irk->val)) {
6141 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6142 				    &irk->addr.bdaddr);
6143 			continue;
6144 		}
6145 
6146 		hci_add_irk(hdev, &irk->addr.bdaddr,
6147 			    le_addr_type(irk->addr.type), irk->val,
6148 			    BDADDR_ANY);
6149 	}
6150 
6151 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6152 
6153 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6154 
6155 	hci_dev_unlock(hdev);
6156 
6157 	return err;
6158 }
6159 
6160 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6161 {
6162 	if (key->master != 0x00 && key->master != 0x01)
6163 		return false;
6164 
6165 	switch (key->addr.type) {
6166 	case BDADDR_LE_PUBLIC:
6167 		return true;
6168 
6169 	case BDADDR_LE_RANDOM:
6170 		/* Two most significant bits shall be set */
6171 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6172 			return false;
6173 		return true;
6174 	}
6175 
6176 	return false;
6177 }
6178 
6179 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6180 			       void *cp_data, u16 len)
6181 {
6182 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6183 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6184 				   sizeof(struct mgmt_ltk_info));
6185 	u16 key_count, expected_len;
6186 	int i, err;
6187 
6188 	bt_dev_dbg(hdev, "sock %p", sk);
6189 
6190 	if (!lmp_le_capable(hdev))
6191 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6192 				       MGMT_STATUS_NOT_SUPPORTED);
6193 
6194 	key_count = __le16_to_cpu(cp->key_count);
6195 	if (key_count > max_key_count) {
6196 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6197 			   key_count);
6198 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6199 				       MGMT_STATUS_INVALID_PARAMS);
6200 	}
6201 
6202 	expected_len = struct_size(cp, keys, key_count);
6203 	if (expected_len != len) {
6204 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6205 			   expected_len, len);
6206 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6207 				       MGMT_STATUS_INVALID_PARAMS);
6208 	}
6209 
6210 	bt_dev_dbg(hdev, "key_count %u", key_count);
6211 
6212 	for (i = 0; i < key_count; i++) {
6213 		struct mgmt_ltk_info *key = &cp->keys[i];
6214 
6215 		if (!ltk_is_valid(key))
6216 			return mgmt_cmd_status(sk, hdev->id,
6217 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6218 					       MGMT_STATUS_INVALID_PARAMS);
6219 	}
6220 
6221 	hci_dev_lock(hdev);
6222 
6223 	hci_smp_ltks_clear(hdev);
6224 
6225 	for (i = 0; i < key_count; i++) {
6226 		struct mgmt_ltk_info *key = &cp->keys[i];
6227 		u8 type, authenticated;
6228 
6229 		if (hci_is_blocked_key(hdev,
6230 				       HCI_BLOCKED_KEY_TYPE_LTK,
6231 				       key->val)) {
6232 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6233 				    &key->addr.bdaddr);
6234 			continue;
6235 		}
6236 
6237 		switch (key->type) {
6238 		case MGMT_LTK_UNAUTHENTICATED:
6239 			authenticated = 0x00;
6240 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6241 			break;
6242 		case MGMT_LTK_AUTHENTICATED:
6243 			authenticated = 0x01;
6244 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6245 			break;
6246 		case MGMT_LTK_P256_UNAUTH:
6247 			authenticated = 0x00;
6248 			type = SMP_LTK_P256;
6249 			break;
6250 		case MGMT_LTK_P256_AUTH:
6251 			authenticated = 0x01;
6252 			type = SMP_LTK_P256;
6253 			break;
6254 		case MGMT_LTK_P256_DEBUG:
6255 			authenticated = 0x00;
6256 			type = SMP_LTK_P256_DEBUG;
6257 			fallthrough;
6258 		default:
6259 			continue;
6260 		}
6261 
6262 		hci_add_ltk(hdev, &key->addr.bdaddr,
6263 			    le_addr_type(key->addr.type), type, authenticated,
6264 			    key->val, key->enc_size, key->ediv, key->rand);
6265 	}
6266 
6267 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6268 			   NULL, 0);
6269 
6270 	hci_dev_unlock(hdev);
6271 
6272 	return err;
6273 }
6274 
6275 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6276 {
6277 	struct hci_conn *conn = cmd->user_data;
6278 	struct mgmt_rp_get_conn_info rp;
6279 	int err;
6280 
6281 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6282 
6283 	if (status == MGMT_STATUS_SUCCESS) {
6284 		rp.rssi = conn->rssi;
6285 		rp.tx_power = conn->tx_power;
6286 		rp.max_tx_power = conn->max_tx_power;
6287 	} else {
6288 		rp.rssi = HCI_RSSI_INVALID;
6289 		rp.tx_power = HCI_TX_POWER_INVALID;
6290 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6291 	}
6292 
6293 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6294 				status, &rp, sizeof(rp));
6295 
6296 	hci_conn_drop(conn);
6297 	hci_conn_put(conn);
6298 
6299 	return err;
6300 }
6301 
6302 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6303 				       u16 opcode)
6304 {
6305 	struct hci_cp_read_rssi *cp;
6306 	struct mgmt_pending_cmd *cmd;
6307 	struct hci_conn *conn;
6308 	u16 handle;
6309 	u8 status;
6310 
6311 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6312 
6313 	hci_dev_lock(hdev);
6314 
6315 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6316 	 * Level so we check which one was last sent to retrieve connection
6317 	 * handle.  Both commands have handle as first parameter so it's safe to
6318 	 * cast data on the same command struct.
6319 	 *
6320 	 * First command sent is always Read RSSI and we fail only if it fails.
6321 	 * In other case we simply override error to indicate success as we
6322 	 * already remembered if TX power value is actually valid.
6323 	 */
6324 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6325 	if (!cp) {
6326 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6327 		status = MGMT_STATUS_SUCCESS;
6328 	} else {
6329 		status = mgmt_status(hci_status);
6330 	}
6331 
6332 	if (!cp) {
6333 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6334 		goto unlock;
6335 	}
6336 
6337 	handle = __le16_to_cpu(cp->handle);
6338 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6339 	if (!conn) {
6340 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6341 			   handle);
6342 		goto unlock;
6343 	}
6344 
6345 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6346 	if (!cmd)
6347 		goto unlock;
6348 
6349 	cmd->cmd_complete(cmd, status);
6350 	mgmt_pending_remove(cmd);
6351 
6352 unlock:
6353 	hci_dev_unlock(hdev);
6354 }
6355 
6356 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6357 			 u16 len)
6358 {
6359 	struct mgmt_cp_get_conn_info *cp = data;
6360 	struct mgmt_rp_get_conn_info rp;
6361 	struct hci_conn *conn;
6362 	unsigned long conn_info_age;
6363 	int err = 0;
6364 
6365 	bt_dev_dbg(hdev, "sock %p", sk);
6366 
6367 	memset(&rp, 0, sizeof(rp));
6368 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6369 	rp.addr.type = cp->addr.type;
6370 
6371 	if (!bdaddr_type_is_valid(cp->addr.type))
6372 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6373 					 MGMT_STATUS_INVALID_PARAMS,
6374 					 &rp, sizeof(rp));
6375 
6376 	hci_dev_lock(hdev);
6377 
6378 	if (!hdev_is_powered(hdev)) {
6379 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6380 					MGMT_STATUS_NOT_POWERED, &rp,
6381 					sizeof(rp));
6382 		goto unlock;
6383 	}
6384 
6385 	if (cp->addr.type == BDADDR_BREDR)
6386 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6387 					       &cp->addr.bdaddr);
6388 	else
6389 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6390 
6391 	if (!conn || conn->state != BT_CONNECTED) {
6392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6393 					MGMT_STATUS_NOT_CONNECTED, &rp,
6394 					sizeof(rp));
6395 		goto unlock;
6396 	}
6397 
6398 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6399 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6400 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6401 		goto unlock;
6402 	}
6403 
6404 	/* To avoid client trying to guess when to poll again for information we
6405 	 * calculate conn info age as random value between min/max set in hdev.
6406 	 */
6407 	conn_info_age = hdev->conn_info_min_age +
6408 			prandom_u32_max(hdev->conn_info_max_age -
6409 					hdev->conn_info_min_age);
6410 
6411 	/* Query controller to refresh cached values if they are too old or were
6412 	 * never read.
6413 	 */
6414 	if (time_after(jiffies, conn->conn_info_timestamp +
6415 		       msecs_to_jiffies(conn_info_age)) ||
6416 	    !conn->conn_info_timestamp) {
6417 		struct hci_request req;
6418 		struct hci_cp_read_tx_power req_txp_cp;
6419 		struct hci_cp_read_rssi req_rssi_cp;
6420 		struct mgmt_pending_cmd *cmd;
6421 
6422 		hci_req_init(&req, hdev);
6423 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6424 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6425 			    &req_rssi_cp);
6426 
6427 		/* For LE links TX power does not change thus we don't need to
6428 		 * query for it once value is known.
6429 		 */
6430 		if (!bdaddr_type_is_le(cp->addr.type) ||
6431 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6432 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6433 			req_txp_cp.type = 0x00;
6434 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6435 				    sizeof(req_txp_cp), &req_txp_cp);
6436 		}
6437 
6438 		/* Max TX power needs to be read only once per connection */
6439 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6440 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6441 			req_txp_cp.type = 0x01;
6442 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6443 				    sizeof(req_txp_cp), &req_txp_cp);
6444 		}
6445 
6446 		err = hci_req_run(&req, conn_info_refresh_complete);
6447 		if (err < 0)
6448 			goto unlock;
6449 
6450 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6451 				       data, len);
6452 		if (!cmd) {
6453 			err = -ENOMEM;
6454 			goto unlock;
6455 		}
6456 
6457 		hci_conn_hold(conn);
6458 		cmd->user_data = hci_conn_get(conn);
6459 		cmd->cmd_complete = conn_info_cmd_complete;
6460 
6461 		conn->conn_info_timestamp = jiffies;
6462 	} else {
6463 		/* Cache is valid, just reply with values cached in hci_conn */
6464 		rp.rssi = conn->rssi;
6465 		rp.tx_power = conn->tx_power;
6466 		rp.max_tx_power = conn->max_tx_power;
6467 
6468 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6469 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6470 	}
6471 
6472 unlock:
6473 	hci_dev_unlock(hdev);
6474 	return err;
6475 }
6476 
6477 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6478 {
6479 	struct hci_conn *conn = cmd->user_data;
6480 	struct mgmt_rp_get_clock_info rp;
6481 	struct hci_dev *hdev;
6482 	int err;
6483 
6484 	memset(&rp, 0, sizeof(rp));
6485 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6486 
6487 	if (status)
6488 		goto complete;
6489 
6490 	hdev = hci_dev_get(cmd->index);
6491 	if (hdev) {
6492 		rp.local_clock = cpu_to_le32(hdev->clock);
6493 		hci_dev_put(hdev);
6494 	}
6495 
6496 	if (conn) {
6497 		rp.piconet_clock = cpu_to_le32(conn->clock);
6498 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6499 	}
6500 
6501 complete:
6502 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6503 				sizeof(rp));
6504 
6505 	if (conn) {
6506 		hci_conn_drop(conn);
6507 		hci_conn_put(conn);
6508 	}
6509 
6510 	return err;
6511 }
6512 
6513 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6514 {
6515 	struct hci_cp_read_clock *hci_cp;
6516 	struct mgmt_pending_cmd *cmd;
6517 	struct hci_conn *conn;
6518 
6519 	bt_dev_dbg(hdev, "status %u", status);
6520 
6521 	hci_dev_lock(hdev);
6522 
6523 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6524 	if (!hci_cp)
6525 		goto unlock;
6526 
6527 	if (hci_cp->which) {
6528 		u16 handle = __le16_to_cpu(hci_cp->handle);
6529 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6530 	} else {
6531 		conn = NULL;
6532 	}
6533 
6534 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6535 	if (!cmd)
6536 		goto unlock;
6537 
6538 	cmd->cmd_complete(cmd, mgmt_status(status));
6539 	mgmt_pending_remove(cmd);
6540 
6541 unlock:
6542 	hci_dev_unlock(hdev);
6543 }
6544 
6545 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6546 			 u16 len)
6547 {
6548 	struct mgmt_cp_get_clock_info *cp = data;
6549 	struct mgmt_rp_get_clock_info rp;
6550 	struct hci_cp_read_clock hci_cp;
6551 	struct mgmt_pending_cmd *cmd;
6552 	struct hci_request req;
6553 	struct hci_conn *conn;
6554 	int err;
6555 
6556 	bt_dev_dbg(hdev, "sock %p", sk);
6557 
6558 	memset(&rp, 0, sizeof(rp));
6559 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6560 	rp.addr.type = cp->addr.type;
6561 
6562 	if (cp->addr.type != BDADDR_BREDR)
6563 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6564 					 MGMT_STATUS_INVALID_PARAMS,
6565 					 &rp, sizeof(rp));
6566 
6567 	hci_dev_lock(hdev);
6568 
6569 	if (!hdev_is_powered(hdev)) {
6570 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6571 					MGMT_STATUS_NOT_POWERED, &rp,
6572 					sizeof(rp));
6573 		goto unlock;
6574 	}
6575 
6576 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6577 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6578 					       &cp->addr.bdaddr);
6579 		if (!conn || conn->state != BT_CONNECTED) {
6580 			err = mgmt_cmd_complete(sk, hdev->id,
6581 						MGMT_OP_GET_CLOCK_INFO,
6582 						MGMT_STATUS_NOT_CONNECTED,
6583 						&rp, sizeof(rp));
6584 			goto unlock;
6585 		}
6586 	} else {
6587 		conn = NULL;
6588 	}
6589 
6590 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6591 	if (!cmd) {
6592 		err = -ENOMEM;
6593 		goto unlock;
6594 	}
6595 
6596 	cmd->cmd_complete = clock_info_cmd_complete;
6597 
6598 	hci_req_init(&req, hdev);
6599 
6600 	memset(&hci_cp, 0, sizeof(hci_cp));
6601 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6602 
6603 	if (conn) {
6604 		hci_conn_hold(conn);
6605 		cmd->user_data = hci_conn_get(conn);
6606 
6607 		hci_cp.handle = cpu_to_le16(conn->handle);
6608 		hci_cp.which = 0x01; /* Piconet clock */
6609 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6610 	}
6611 
6612 	err = hci_req_run(&req, get_clock_info_complete);
6613 	if (err < 0)
6614 		mgmt_pending_remove(cmd);
6615 
6616 unlock:
6617 	hci_dev_unlock(hdev);
6618 	return err;
6619 }
6620 
6621 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6622 {
6623 	struct hci_conn *conn;
6624 
6625 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6626 	if (!conn)
6627 		return false;
6628 
6629 	if (conn->dst_type != type)
6630 		return false;
6631 
6632 	if (conn->state != BT_CONNECTED)
6633 		return false;
6634 
6635 	return true;
6636 }
6637 
6638 /* This function requires the caller holds hdev->lock */
6639 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6640 			       u8 addr_type, u8 auto_connect)
6641 {
6642 	struct hci_conn_params *params;
6643 
6644 	params = hci_conn_params_add(hdev, addr, addr_type);
6645 	if (!params)
6646 		return -EIO;
6647 
6648 	if (params->auto_connect == auto_connect)
6649 		return 0;
6650 
6651 	list_del_init(&params->action);
6652 
6653 	switch (auto_connect) {
6654 	case HCI_AUTO_CONN_DISABLED:
6655 	case HCI_AUTO_CONN_LINK_LOSS:
6656 		/* If auto connect is being disabled when we're trying to
6657 		 * connect to device, keep connecting.
6658 		 */
6659 		if (params->explicit_connect)
6660 			list_add(&params->action, &hdev->pend_le_conns);
6661 		break;
6662 	case HCI_AUTO_CONN_REPORT:
6663 		if (params->explicit_connect)
6664 			list_add(&params->action, &hdev->pend_le_conns);
6665 		else
6666 			list_add(&params->action, &hdev->pend_le_reports);
6667 		break;
6668 	case HCI_AUTO_CONN_DIRECT:
6669 	case HCI_AUTO_CONN_ALWAYS:
6670 		if (!is_connected(hdev, addr, addr_type))
6671 			list_add(&params->action, &hdev->pend_le_conns);
6672 		break;
6673 	}
6674 
6675 	params->auto_connect = auto_connect;
6676 
6677 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6678 		   addr, addr_type, auto_connect);
6679 
6680 	return 0;
6681 }
6682 
6683 static void device_added(struct sock *sk, struct hci_dev *hdev,
6684 			 bdaddr_t *bdaddr, u8 type, u8 action)
6685 {
6686 	struct mgmt_ev_device_added ev;
6687 
6688 	bacpy(&ev.addr.bdaddr, bdaddr);
6689 	ev.addr.type = type;
6690 	ev.action = action;
6691 
6692 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6693 }
6694 
6695 static int add_device(struct sock *sk, struct hci_dev *hdev,
6696 		      void *data, u16 len)
6697 {
6698 	struct mgmt_cp_add_device *cp = data;
6699 	u8 auto_conn, addr_type;
6700 	struct hci_conn_params *params;
6701 	int err;
6702 	u32 current_flags = 0;
6703 
6704 	bt_dev_dbg(hdev, "sock %p", sk);
6705 
6706 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6707 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6708 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6709 					 MGMT_STATUS_INVALID_PARAMS,
6710 					 &cp->addr, sizeof(cp->addr));
6711 
6712 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6713 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6714 					 MGMT_STATUS_INVALID_PARAMS,
6715 					 &cp->addr, sizeof(cp->addr));
6716 
6717 	hci_dev_lock(hdev);
6718 
6719 	if (cp->addr.type == BDADDR_BREDR) {
6720 		/* Only incoming connections action is supported for now */
6721 		if (cp->action != 0x01) {
6722 			err = mgmt_cmd_complete(sk, hdev->id,
6723 						MGMT_OP_ADD_DEVICE,
6724 						MGMT_STATUS_INVALID_PARAMS,
6725 						&cp->addr, sizeof(cp->addr));
6726 			goto unlock;
6727 		}
6728 
6729 		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6730 						     &cp->addr.bdaddr,
6731 						     cp->addr.type, 0);
6732 		if (err)
6733 			goto unlock;
6734 
6735 		hci_req_update_scan(hdev);
6736 
6737 		goto added;
6738 	}
6739 
6740 	addr_type = le_addr_type(cp->addr.type);
6741 
6742 	if (cp->action == 0x02)
6743 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6744 	else if (cp->action == 0x01)
6745 		auto_conn = HCI_AUTO_CONN_DIRECT;
6746 	else
6747 		auto_conn = HCI_AUTO_CONN_REPORT;
6748 
6749 	/* Kernel internally uses conn_params with resolvable private
6750 	 * address, but Add Device allows only identity addresses.
6751 	 * Make sure it is enforced before calling
6752 	 * hci_conn_params_lookup.
6753 	 */
6754 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6755 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6756 					MGMT_STATUS_INVALID_PARAMS,
6757 					&cp->addr, sizeof(cp->addr));
6758 		goto unlock;
6759 	}
6760 
6761 	/* If the connection parameters don't exist for this device,
6762 	 * they will be created and configured with defaults.
6763 	 */
6764 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6765 				auto_conn) < 0) {
6766 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6767 					MGMT_STATUS_FAILED, &cp->addr,
6768 					sizeof(cp->addr));
6769 		goto unlock;
6770 	} else {
6771 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6772 						addr_type);
6773 		if (params)
6774 			current_flags = params->current_flags;
6775 	}
6776 
6777 	hci_update_background_scan(hdev);
6778 
6779 added:
6780 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6781 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6782 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6783 
6784 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6785 				MGMT_STATUS_SUCCESS, &cp->addr,
6786 				sizeof(cp->addr));
6787 
6788 unlock:
6789 	hci_dev_unlock(hdev);
6790 	return err;
6791 }
6792 
6793 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6794 			   bdaddr_t *bdaddr, u8 type)
6795 {
6796 	struct mgmt_ev_device_removed ev;
6797 
6798 	bacpy(&ev.addr.bdaddr, bdaddr);
6799 	ev.addr.type = type;
6800 
6801 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6802 }
6803 
6804 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6805 			 void *data, u16 len)
6806 {
6807 	struct mgmt_cp_remove_device *cp = data;
6808 	int err;
6809 
6810 	bt_dev_dbg(hdev, "sock %p", sk);
6811 
6812 	hci_dev_lock(hdev);
6813 
6814 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6815 		struct hci_conn_params *params;
6816 		u8 addr_type;
6817 
6818 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6819 			err = mgmt_cmd_complete(sk, hdev->id,
6820 						MGMT_OP_REMOVE_DEVICE,
6821 						MGMT_STATUS_INVALID_PARAMS,
6822 						&cp->addr, sizeof(cp->addr));
6823 			goto unlock;
6824 		}
6825 
6826 		if (cp->addr.type == BDADDR_BREDR) {
6827 			err = hci_bdaddr_list_del(&hdev->whitelist,
6828 						  &cp->addr.bdaddr,
6829 						  cp->addr.type);
6830 			if (err) {
6831 				err = mgmt_cmd_complete(sk, hdev->id,
6832 							MGMT_OP_REMOVE_DEVICE,
6833 							MGMT_STATUS_INVALID_PARAMS,
6834 							&cp->addr,
6835 							sizeof(cp->addr));
6836 				goto unlock;
6837 			}
6838 
6839 			hci_req_update_scan(hdev);
6840 
6841 			device_removed(sk, hdev, &cp->addr.bdaddr,
6842 				       cp->addr.type);
6843 			goto complete;
6844 		}
6845 
6846 		addr_type = le_addr_type(cp->addr.type);
6847 
6848 		/* Kernel internally uses conn_params with resolvable private
6849 		 * address, but Remove Device allows only identity addresses.
6850 		 * Make sure it is enforced before calling
6851 		 * hci_conn_params_lookup.
6852 		 */
6853 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6854 			err = mgmt_cmd_complete(sk, hdev->id,
6855 						MGMT_OP_REMOVE_DEVICE,
6856 						MGMT_STATUS_INVALID_PARAMS,
6857 						&cp->addr, sizeof(cp->addr));
6858 			goto unlock;
6859 		}
6860 
6861 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6862 						addr_type);
6863 		if (!params) {
6864 			err = mgmt_cmd_complete(sk, hdev->id,
6865 						MGMT_OP_REMOVE_DEVICE,
6866 						MGMT_STATUS_INVALID_PARAMS,
6867 						&cp->addr, sizeof(cp->addr));
6868 			goto unlock;
6869 		}
6870 
6871 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6872 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6873 			err = mgmt_cmd_complete(sk, hdev->id,
6874 						MGMT_OP_REMOVE_DEVICE,
6875 						MGMT_STATUS_INVALID_PARAMS,
6876 						&cp->addr, sizeof(cp->addr));
6877 			goto unlock;
6878 		}
6879 
6880 		list_del(&params->action);
6881 		list_del(&params->list);
6882 		kfree(params);
6883 		hci_update_background_scan(hdev);
6884 
6885 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6886 	} else {
6887 		struct hci_conn_params *p, *tmp;
6888 		struct bdaddr_list *b, *btmp;
6889 
6890 		if (cp->addr.type) {
6891 			err = mgmt_cmd_complete(sk, hdev->id,
6892 						MGMT_OP_REMOVE_DEVICE,
6893 						MGMT_STATUS_INVALID_PARAMS,
6894 						&cp->addr, sizeof(cp->addr));
6895 			goto unlock;
6896 		}
6897 
6898 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6899 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6900 			list_del(&b->list);
6901 			kfree(b);
6902 		}
6903 
6904 		hci_req_update_scan(hdev);
6905 
6906 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6907 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6908 				continue;
6909 			device_removed(sk, hdev, &p->addr, p->addr_type);
6910 			if (p->explicit_connect) {
6911 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6912 				continue;
6913 			}
6914 			list_del(&p->action);
6915 			list_del(&p->list);
6916 			kfree(p);
6917 		}
6918 
6919 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6920 
6921 		hci_update_background_scan(hdev);
6922 	}
6923 
6924 complete:
6925 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6926 				MGMT_STATUS_SUCCESS, &cp->addr,
6927 				sizeof(cp->addr));
6928 unlock:
6929 	hci_dev_unlock(hdev);
6930 	return err;
6931 }
6932 
6933 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6934 			   u16 len)
6935 {
6936 	struct mgmt_cp_load_conn_param *cp = data;
6937 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6938 				     sizeof(struct mgmt_conn_param));
6939 	u16 param_count, expected_len;
6940 	int i;
6941 
6942 	if (!lmp_le_capable(hdev))
6943 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6944 				       MGMT_STATUS_NOT_SUPPORTED);
6945 
6946 	param_count = __le16_to_cpu(cp->param_count);
6947 	if (param_count > max_param_count) {
6948 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6949 			   param_count);
6950 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6951 				       MGMT_STATUS_INVALID_PARAMS);
6952 	}
6953 
6954 	expected_len = struct_size(cp, params, param_count);
6955 	if (expected_len != len) {
6956 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6957 			   expected_len, len);
6958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6959 				       MGMT_STATUS_INVALID_PARAMS);
6960 	}
6961 
6962 	bt_dev_dbg(hdev, "param_count %u", param_count);
6963 
6964 	hci_dev_lock(hdev);
6965 
6966 	hci_conn_params_clear_disabled(hdev);
6967 
6968 	for (i = 0; i < param_count; i++) {
6969 		struct mgmt_conn_param *param = &cp->params[i];
6970 		struct hci_conn_params *hci_param;
6971 		u16 min, max, latency, timeout;
6972 		u8 addr_type;
6973 
6974 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6975 			   param->addr.type);
6976 
6977 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6978 			addr_type = ADDR_LE_DEV_PUBLIC;
6979 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6980 			addr_type = ADDR_LE_DEV_RANDOM;
6981 		} else {
6982 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6983 			continue;
6984 		}
6985 
6986 		min = le16_to_cpu(param->min_interval);
6987 		max = le16_to_cpu(param->max_interval);
6988 		latency = le16_to_cpu(param->latency);
6989 		timeout = le16_to_cpu(param->timeout);
6990 
6991 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6992 			   min, max, latency, timeout);
6993 
6994 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6995 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6996 			continue;
6997 		}
6998 
6999 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7000 						addr_type);
7001 		if (!hci_param) {
7002 			bt_dev_err(hdev, "failed to add connection parameters");
7003 			continue;
7004 		}
7005 
7006 		hci_param->conn_min_interval = min;
7007 		hci_param->conn_max_interval = max;
7008 		hci_param->conn_latency = latency;
7009 		hci_param->supervision_timeout = timeout;
7010 	}
7011 
7012 	hci_dev_unlock(hdev);
7013 
7014 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7015 				 NULL, 0);
7016 }
7017 
7018 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7019 			       void *data, u16 len)
7020 {
7021 	struct mgmt_cp_set_external_config *cp = data;
7022 	bool changed;
7023 	int err;
7024 
7025 	bt_dev_dbg(hdev, "sock %p", sk);
7026 
7027 	if (hdev_is_powered(hdev))
7028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7029 				       MGMT_STATUS_REJECTED);
7030 
7031 	if (cp->config != 0x00 && cp->config != 0x01)
7032 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7033 				         MGMT_STATUS_INVALID_PARAMS);
7034 
7035 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7036 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7037 				       MGMT_STATUS_NOT_SUPPORTED);
7038 
7039 	hci_dev_lock(hdev);
7040 
7041 	if (cp->config)
7042 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7043 	else
7044 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7045 
7046 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7047 	if (err < 0)
7048 		goto unlock;
7049 
7050 	if (!changed)
7051 		goto unlock;
7052 
7053 	err = new_options(hdev, sk);
7054 
7055 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7056 		mgmt_index_removed(hdev);
7057 
7058 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7059 			hci_dev_set_flag(hdev, HCI_CONFIG);
7060 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7061 
7062 			queue_work(hdev->req_workqueue, &hdev->power_on);
7063 		} else {
7064 			set_bit(HCI_RAW, &hdev->flags);
7065 			mgmt_index_added(hdev);
7066 		}
7067 	}
7068 
7069 unlock:
7070 	hci_dev_unlock(hdev);
7071 	return err;
7072 }
7073 
7074 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7075 			      void *data, u16 len)
7076 {
7077 	struct mgmt_cp_set_public_address *cp = data;
7078 	bool changed;
7079 	int err;
7080 
7081 	bt_dev_dbg(hdev, "sock %p", sk);
7082 
7083 	if (hdev_is_powered(hdev))
7084 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7085 				       MGMT_STATUS_REJECTED);
7086 
7087 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7088 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7089 				       MGMT_STATUS_INVALID_PARAMS);
7090 
7091 	if (!hdev->set_bdaddr)
7092 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7093 				       MGMT_STATUS_NOT_SUPPORTED);
7094 
7095 	hci_dev_lock(hdev);
7096 
7097 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7098 	bacpy(&hdev->public_addr, &cp->bdaddr);
7099 
7100 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7101 	if (err < 0)
7102 		goto unlock;
7103 
7104 	if (!changed)
7105 		goto unlock;
7106 
7107 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7108 		err = new_options(hdev, sk);
7109 
7110 	if (is_configured(hdev)) {
7111 		mgmt_index_removed(hdev);
7112 
7113 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7114 
7115 		hci_dev_set_flag(hdev, HCI_CONFIG);
7116 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7117 
7118 		queue_work(hdev->req_workqueue, &hdev->power_on);
7119 	}
7120 
7121 unlock:
7122 	hci_dev_unlock(hdev);
7123 	return err;
7124 }
7125 
7126 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7127 					     u16 opcode, struct sk_buff *skb)
7128 {
7129 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7130 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7131 	u8 *h192, *r192, *h256, *r256;
7132 	struct mgmt_pending_cmd *cmd;
7133 	u16 eir_len;
7134 	int err;
7135 
7136 	bt_dev_dbg(hdev, "status %u", status);
7137 
7138 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7139 	if (!cmd)
7140 		return;
7141 
7142 	mgmt_cp = cmd->param;
7143 
7144 	if (status) {
7145 		status = mgmt_status(status);
7146 		eir_len = 0;
7147 
7148 		h192 = NULL;
7149 		r192 = NULL;
7150 		h256 = NULL;
7151 		r256 = NULL;
7152 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7153 		struct hci_rp_read_local_oob_data *rp;
7154 
7155 		if (skb->len != sizeof(*rp)) {
7156 			status = MGMT_STATUS_FAILED;
7157 			eir_len = 0;
7158 		} else {
7159 			status = MGMT_STATUS_SUCCESS;
7160 			rp = (void *)skb->data;
7161 
7162 			eir_len = 5 + 18 + 18;
7163 			h192 = rp->hash;
7164 			r192 = rp->rand;
7165 			h256 = NULL;
7166 			r256 = NULL;
7167 		}
7168 	} else {
7169 		struct hci_rp_read_local_oob_ext_data *rp;
7170 
7171 		if (skb->len != sizeof(*rp)) {
7172 			status = MGMT_STATUS_FAILED;
7173 			eir_len = 0;
7174 		} else {
7175 			status = MGMT_STATUS_SUCCESS;
7176 			rp = (void *)skb->data;
7177 
7178 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7179 				eir_len = 5 + 18 + 18;
7180 				h192 = NULL;
7181 				r192 = NULL;
7182 			} else {
7183 				eir_len = 5 + 18 + 18 + 18 + 18;
7184 				h192 = rp->hash192;
7185 				r192 = rp->rand192;
7186 			}
7187 
7188 			h256 = rp->hash256;
7189 			r256 = rp->rand256;
7190 		}
7191 	}
7192 
7193 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7194 	if (!mgmt_rp)
7195 		goto done;
7196 
7197 	if (status)
7198 		goto send_rsp;
7199 
7200 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7201 				  hdev->dev_class, 3);
7202 
7203 	if (h192 && r192) {
7204 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7205 					  EIR_SSP_HASH_C192, h192, 16);
7206 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7207 					  EIR_SSP_RAND_R192, r192, 16);
7208 	}
7209 
7210 	if (h256 && r256) {
7211 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7212 					  EIR_SSP_HASH_C256, h256, 16);
7213 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7214 					  EIR_SSP_RAND_R256, r256, 16);
7215 	}
7216 
7217 send_rsp:
7218 	mgmt_rp->type = mgmt_cp->type;
7219 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7220 
7221 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7222 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7223 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7224 	if (err < 0 || status)
7225 		goto done;
7226 
7227 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7228 
7229 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7230 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7231 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7232 done:
7233 	kfree(mgmt_rp);
7234 	mgmt_pending_remove(cmd);
7235 }
7236 
7237 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7238 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7239 {
7240 	struct mgmt_pending_cmd *cmd;
7241 	struct hci_request req;
7242 	int err;
7243 
7244 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7245 			       cp, sizeof(*cp));
7246 	if (!cmd)
7247 		return -ENOMEM;
7248 
7249 	hci_req_init(&req, hdev);
7250 
7251 	if (bredr_sc_enabled(hdev))
7252 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7253 	else
7254 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7255 
7256 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7257 	if (err < 0) {
7258 		mgmt_pending_remove(cmd);
7259 		return err;
7260 	}
7261 
7262 	return 0;
7263 }
7264 
7265 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7266 				   void *data, u16 data_len)
7267 {
7268 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7269 	struct mgmt_rp_read_local_oob_ext_data *rp;
7270 	size_t rp_len;
7271 	u16 eir_len;
7272 	u8 status, flags, role, addr[7], hash[16], rand[16];
7273 	int err;
7274 
7275 	bt_dev_dbg(hdev, "sock %p", sk);
7276 
7277 	if (hdev_is_powered(hdev)) {
7278 		switch (cp->type) {
7279 		case BIT(BDADDR_BREDR):
7280 			status = mgmt_bredr_support(hdev);
7281 			if (status)
7282 				eir_len = 0;
7283 			else
7284 				eir_len = 5;
7285 			break;
7286 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7287 			status = mgmt_le_support(hdev);
7288 			if (status)
7289 				eir_len = 0;
7290 			else
7291 				eir_len = 9 + 3 + 18 + 18 + 3;
7292 			break;
7293 		default:
7294 			status = MGMT_STATUS_INVALID_PARAMS;
7295 			eir_len = 0;
7296 			break;
7297 		}
7298 	} else {
7299 		status = MGMT_STATUS_NOT_POWERED;
7300 		eir_len = 0;
7301 	}
7302 
7303 	rp_len = sizeof(*rp) + eir_len;
7304 	rp = kmalloc(rp_len, GFP_ATOMIC);
7305 	if (!rp)
7306 		return -ENOMEM;
7307 
7308 	if (status)
7309 		goto complete;
7310 
7311 	hci_dev_lock(hdev);
7312 
7313 	eir_len = 0;
7314 	switch (cp->type) {
7315 	case BIT(BDADDR_BREDR):
7316 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7317 			err = read_local_ssp_oob_req(hdev, sk, cp);
7318 			hci_dev_unlock(hdev);
7319 			if (!err)
7320 				goto done;
7321 
7322 			status = MGMT_STATUS_FAILED;
7323 			goto complete;
7324 		} else {
7325 			eir_len = eir_append_data(rp->eir, eir_len,
7326 						  EIR_CLASS_OF_DEV,
7327 						  hdev->dev_class, 3);
7328 		}
7329 		break;
7330 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7331 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7332 		    smp_generate_oob(hdev, hash, rand) < 0) {
7333 			hci_dev_unlock(hdev);
7334 			status = MGMT_STATUS_FAILED;
7335 			goto complete;
7336 		}
7337 
7338 		/* This should return the active RPA, but since the RPA
7339 		 * is only programmed on demand, it is really hard to fill
7340 		 * this in at the moment. For now disallow retrieving
7341 		 * local out-of-band data when privacy is in use.
7342 		 *
7343 		 * Returning the identity address will not help here since
7344 		 * pairing happens before the identity resolving key is
7345 		 * known and thus the connection establishment happens
7346 		 * based on the RPA and not the identity address.
7347 		 */
7348 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7349 			hci_dev_unlock(hdev);
7350 			status = MGMT_STATUS_REJECTED;
7351 			goto complete;
7352 		}
7353 
7354 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7355 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7356 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7357 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7358 			memcpy(addr, &hdev->static_addr, 6);
7359 			addr[6] = 0x01;
7360 		} else {
7361 			memcpy(addr, &hdev->bdaddr, 6);
7362 			addr[6] = 0x00;
7363 		}
7364 
7365 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7366 					  addr, sizeof(addr));
7367 
7368 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7369 			role = 0x02;
7370 		else
7371 			role = 0x01;
7372 
7373 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7374 					  &role, sizeof(role));
7375 
7376 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7377 			eir_len = eir_append_data(rp->eir, eir_len,
7378 						  EIR_LE_SC_CONFIRM,
7379 						  hash, sizeof(hash));
7380 
7381 			eir_len = eir_append_data(rp->eir, eir_len,
7382 						  EIR_LE_SC_RANDOM,
7383 						  rand, sizeof(rand));
7384 		}
7385 
7386 		flags = mgmt_get_adv_discov_flags(hdev);
7387 
7388 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7389 			flags |= LE_AD_NO_BREDR;
7390 
7391 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7392 					  &flags, sizeof(flags));
7393 		break;
7394 	}
7395 
7396 	hci_dev_unlock(hdev);
7397 
7398 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7399 
7400 	status = MGMT_STATUS_SUCCESS;
7401 
7402 complete:
7403 	rp->type = cp->type;
7404 	rp->eir_len = cpu_to_le16(eir_len);
7405 
7406 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7407 				status, rp, sizeof(*rp) + eir_len);
7408 	if (err < 0 || status)
7409 		goto done;
7410 
7411 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7412 				 rp, sizeof(*rp) + eir_len,
7413 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7414 
7415 done:
7416 	kfree(rp);
7417 
7418 	return err;
7419 }
7420 
7421 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7422 {
7423 	u32 flags = 0;
7424 
7425 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7426 	flags |= MGMT_ADV_FLAG_DISCOV;
7427 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7428 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7429 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7430 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7431 	flags |= MGMT_ADV_PARAM_DURATION;
7432 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7433 	flags |= MGMT_ADV_PARAM_INTERVALS;
7434 	flags |= MGMT_ADV_PARAM_TX_POWER;
7435 
7436 	/* In extended adv TX_POWER returned from Set Adv Param
7437 	 * will be always valid.
7438 	 */
7439 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7440 	    ext_adv_capable(hdev))
7441 		flags |= MGMT_ADV_FLAG_TX_POWER;
7442 
7443 	if (ext_adv_capable(hdev)) {
7444 		flags |= MGMT_ADV_FLAG_SEC_1M;
7445 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7446 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7447 
7448 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7449 			flags |= MGMT_ADV_FLAG_SEC_2M;
7450 
7451 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7452 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7453 	}
7454 
7455 	return flags;
7456 }
7457 
7458 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7459 			     void *data, u16 data_len)
7460 {
7461 	struct mgmt_rp_read_adv_features *rp;
7462 	size_t rp_len;
7463 	int err;
7464 	struct adv_info *adv_instance;
7465 	u32 supported_flags;
7466 	u8 *instance;
7467 
7468 	bt_dev_dbg(hdev, "sock %p", sk);
7469 
7470 	if (!lmp_le_capable(hdev))
7471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7472 				       MGMT_STATUS_REJECTED);
7473 
7474 	/* Enabling the experimental LL Privay support disables support for
7475 	 * advertising.
7476 	 */
7477 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7478 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7479 				       MGMT_STATUS_NOT_SUPPORTED);
7480 
7481 	hci_dev_lock(hdev);
7482 
7483 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7484 	rp = kmalloc(rp_len, GFP_ATOMIC);
7485 	if (!rp) {
7486 		hci_dev_unlock(hdev);
7487 		return -ENOMEM;
7488 	}
7489 
7490 	supported_flags = get_supported_adv_flags(hdev);
7491 
7492 	rp->supported_flags = cpu_to_le32(supported_flags);
7493 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7494 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7495 	rp->max_instances = hdev->le_num_of_adv_sets;
7496 	rp->num_instances = hdev->adv_instance_cnt;
7497 
7498 	instance = rp->instance;
7499 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7500 		*instance = adv_instance->instance;
7501 		instance++;
7502 	}
7503 
7504 	hci_dev_unlock(hdev);
7505 
7506 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7507 				MGMT_STATUS_SUCCESS, rp, rp_len);
7508 
7509 	kfree(rp);
7510 
7511 	return err;
7512 }
7513 
7514 static u8 calculate_name_len(struct hci_dev *hdev)
7515 {
7516 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7517 
7518 	return append_local_name(hdev, buf, 0);
7519 }
7520 
7521 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7522 			   bool is_adv_data)
7523 {
7524 	u8 max_len = HCI_MAX_AD_LENGTH;
7525 
7526 	if (is_adv_data) {
7527 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7528 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7529 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7530 			max_len -= 3;
7531 
7532 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7533 			max_len -= 3;
7534 	} else {
7535 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7536 			max_len -= calculate_name_len(hdev);
7537 
7538 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7539 			max_len -= 4;
7540 	}
7541 
7542 	return max_len;
7543 }
7544 
7545 static bool flags_managed(u32 adv_flags)
7546 {
7547 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7548 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7549 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7550 }
7551 
7552 static bool tx_power_managed(u32 adv_flags)
7553 {
7554 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7555 }
7556 
7557 static bool name_managed(u32 adv_flags)
7558 {
7559 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7560 }
7561 
7562 static bool appearance_managed(u32 adv_flags)
7563 {
7564 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7565 }
7566 
7567 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7568 			      u8 len, bool is_adv_data)
7569 {
7570 	int i, cur_len;
7571 	u8 max_len;
7572 
7573 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7574 
7575 	if (len > max_len)
7576 		return false;
7577 
7578 	/* Make sure that the data is correctly formatted. */
7579 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7580 		cur_len = data[i];
7581 
7582 		if (data[i + 1] == EIR_FLAGS &&
7583 		    (!is_adv_data || flags_managed(adv_flags)))
7584 			return false;
7585 
7586 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7587 			return false;
7588 
7589 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7590 			return false;
7591 
7592 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7593 			return false;
7594 
7595 		if (data[i + 1] == EIR_APPEARANCE &&
7596 		    appearance_managed(adv_flags))
7597 			return false;
7598 
7599 		/* If the current field length would exceed the total data
7600 		 * length, then it's invalid.
7601 		 */
7602 		if (i + cur_len >= len)
7603 			return false;
7604 	}
7605 
7606 	return true;
7607 }
7608 
7609 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7610 {
7611 	u32 supported_flags, phy_flags;
7612 
7613 	/* The current implementation only supports a subset of the specified
7614 	 * flags. Also need to check mutual exclusiveness of sec flags.
7615 	 */
7616 	supported_flags = get_supported_adv_flags(hdev);
7617 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7618 	if (adv_flags & ~supported_flags ||
7619 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7620 		return false;
7621 
7622 	return true;
7623 }
7624 
7625 static bool adv_busy(struct hci_dev *hdev)
7626 {
7627 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7628 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7629 		pending_find(MGMT_OP_SET_LE, hdev) ||
7630 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7631 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7632 }
7633 
7634 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7635 				     u16 opcode)
7636 {
7637 	struct mgmt_pending_cmd *cmd;
7638 	struct mgmt_cp_add_advertising *cp;
7639 	struct mgmt_rp_add_advertising rp;
7640 	struct adv_info *adv_instance, *n;
7641 	u8 instance;
7642 
7643 	bt_dev_dbg(hdev, "status %d", status);
7644 
7645 	hci_dev_lock(hdev);
7646 
7647 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7648 	if (!cmd)
7649 		cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7650 
7651 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7652 		if (!adv_instance->pending)
7653 			continue;
7654 
7655 		if (!status) {
7656 			adv_instance->pending = false;
7657 			continue;
7658 		}
7659 
7660 		instance = adv_instance->instance;
7661 
7662 		if (hdev->cur_adv_instance == instance)
7663 			cancel_adv_timeout(hdev);
7664 
7665 		hci_remove_adv_instance(hdev, instance);
7666 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7667 	}
7668 
7669 	if (!cmd)
7670 		goto unlock;
7671 
7672 	cp = cmd->param;
7673 	rp.instance = cp->instance;
7674 
7675 	if (status)
7676 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7677 				mgmt_status(status));
7678 	else
7679 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7680 				  mgmt_status(status), &rp, sizeof(rp));
7681 
7682 	mgmt_pending_remove(cmd);
7683 
7684 unlock:
7685 	hci_dev_unlock(hdev);
7686 }
7687 
7688 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7689 			   void *data, u16 data_len)
7690 {
7691 	struct mgmt_cp_add_advertising *cp = data;
7692 	struct mgmt_rp_add_advertising rp;
7693 	u32 flags;
7694 	u8 status;
7695 	u16 timeout, duration;
7696 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7697 	u8 schedule_instance = 0;
7698 	struct adv_info *next_instance;
7699 	int err;
7700 	struct mgmt_pending_cmd *cmd;
7701 	struct hci_request req;
7702 
7703 	bt_dev_dbg(hdev, "sock %p", sk);
7704 
7705 	status = mgmt_le_support(hdev);
7706 	if (status)
7707 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7708 				       status);
7709 
7710 	/* Enabling the experimental LL Privay support disables support for
7711 	 * advertising.
7712 	 */
7713 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7714 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7715 				       MGMT_STATUS_NOT_SUPPORTED);
7716 
7717 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7719 				       MGMT_STATUS_INVALID_PARAMS);
7720 
7721 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7722 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7723 				       MGMT_STATUS_INVALID_PARAMS);
7724 
7725 	flags = __le32_to_cpu(cp->flags);
7726 	timeout = __le16_to_cpu(cp->timeout);
7727 	duration = __le16_to_cpu(cp->duration);
7728 
7729 	if (!requested_adv_flags_are_valid(hdev, flags))
7730 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7731 				       MGMT_STATUS_INVALID_PARAMS);
7732 
7733 	hci_dev_lock(hdev);
7734 
7735 	if (timeout && !hdev_is_powered(hdev)) {
7736 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7737 				      MGMT_STATUS_REJECTED);
7738 		goto unlock;
7739 	}
7740 
7741 	if (adv_busy(hdev)) {
7742 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7743 				      MGMT_STATUS_BUSY);
7744 		goto unlock;
7745 	}
7746 
7747 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7748 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7749 			       cp->scan_rsp_len, false)) {
7750 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7751 				      MGMT_STATUS_INVALID_PARAMS);
7752 		goto unlock;
7753 	}
7754 
7755 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7756 				   cp->adv_data_len, cp->data,
7757 				   cp->scan_rsp_len,
7758 				   cp->data + cp->adv_data_len,
7759 				   timeout, duration,
7760 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
7761 				   hdev->le_adv_min_interval,
7762 				   hdev->le_adv_max_interval);
7763 	if (err < 0) {
7764 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7765 				      MGMT_STATUS_FAILED);
7766 		goto unlock;
7767 	}
7768 
7769 	/* Only trigger an advertising added event if a new instance was
7770 	 * actually added.
7771 	 */
7772 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7773 		mgmt_advertising_added(sk, hdev, cp->instance);
7774 
7775 	if (hdev->cur_adv_instance == cp->instance) {
7776 		/* If the currently advertised instance is being changed then
7777 		 * cancel the current advertising and schedule the next
7778 		 * instance. If there is only one instance then the overridden
7779 		 * advertising data will be visible right away.
7780 		 */
7781 		cancel_adv_timeout(hdev);
7782 
7783 		next_instance = hci_get_next_instance(hdev, cp->instance);
7784 		if (next_instance)
7785 			schedule_instance = next_instance->instance;
7786 	} else if (!hdev->adv_instance_timeout) {
7787 		/* Immediately advertise the new instance if no other
7788 		 * instance is currently being advertised.
7789 		 */
7790 		schedule_instance = cp->instance;
7791 	}
7792 
7793 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7794 	 * there is no instance to be advertised then we have no HCI
7795 	 * communication to make. Simply return.
7796 	 */
7797 	if (!hdev_is_powered(hdev) ||
7798 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7799 	    !schedule_instance) {
7800 		rp.instance = cp->instance;
7801 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7802 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7803 		goto unlock;
7804 	}
7805 
7806 	/* We're good to go, update advertising data, parameters, and start
7807 	 * advertising.
7808 	 */
7809 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7810 			       data_len);
7811 	if (!cmd) {
7812 		err = -ENOMEM;
7813 		goto unlock;
7814 	}
7815 
7816 	hci_req_init(&req, hdev);
7817 
7818 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7819 
7820 	if (!err)
7821 		err = hci_req_run(&req, add_advertising_complete);
7822 
7823 	if (err < 0) {
7824 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7825 				      MGMT_STATUS_FAILED);
7826 		mgmt_pending_remove(cmd);
7827 	}
7828 
7829 unlock:
7830 	hci_dev_unlock(hdev);
7831 
7832 	return err;
7833 }
7834 
7835 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7836 					u16 opcode)
7837 {
7838 	struct mgmt_pending_cmd *cmd;
7839 	struct mgmt_cp_add_ext_adv_params *cp;
7840 	struct mgmt_rp_add_ext_adv_params rp;
7841 	struct adv_info *adv_instance;
7842 	u32 flags;
7843 
7844 	BT_DBG("%s", hdev->name);
7845 
7846 	hci_dev_lock(hdev);
7847 
7848 	cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7849 	if (!cmd)
7850 		goto unlock;
7851 
7852 	cp = cmd->param;
7853 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
7854 	if (!adv_instance)
7855 		goto unlock;
7856 
7857 	rp.instance = cp->instance;
7858 	rp.tx_power = adv_instance->tx_power;
7859 
7860 	/* While we're at it, inform userspace of the available space for this
7861 	 * advertisement, given the flags that will be used.
7862 	 */
7863 	flags = __le32_to_cpu(cp->flags);
7864 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7865 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7866 
7867 	if (status) {
7868 		/* If this advertisement was previously advertising and we
7869 		 * failed to update it, we signal that it has been removed and
7870 		 * delete its structure
7871 		 */
7872 		if (!adv_instance->pending)
7873 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7874 
7875 		hci_remove_adv_instance(hdev, cp->instance);
7876 
7877 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7878 				mgmt_status(status));
7879 
7880 	} else {
7881 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7882 				  mgmt_status(status), &rp, sizeof(rp));
7883 	}
7884 
7885 unlock:
7886 	if (cmd)
7887 		mgmt_pending_remove(cmd);
7888 
7889 	hci_dev_unlock(hdev);
7890 }
7891 
7892 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7893 			      void *data, u16 data_len)
7894 {
7895 	struct mgmt_cp_add_ext_adv_params *cp = data;
7896 	struct mgmt_rp_add_ext_adv_params rp;
7897 	struct mgmt_pending_cmd *cmd = NULL;
7898 	struct adv_info *adv_instance;
7899 	struct hci_request req;
7900 	u32 flags, min_interval, max_interval;
7901 	u16 timeout, duration;
7902 	u8 status;
7903 	s8 tx_power;
7904 	int err;
7905 
7906 	BT_DBG("%s", hdev->name);
7907 
7908 	status = mgmt_le_support(hdev);
7909 	if (status)
7910 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7911 				       status);
7912 
7913 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7914 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7915 				       MGMT_STATUS_INVALID_PARAMS);
7916 
7917 	/* The purpose of breaking add_advertising into two separate MGMT calls
7918 	 * for params and data is to allow more parameters to be added to this
7919 	 * structure in the future. For this reason, we verify that we have the
7920 	 * bare minimum structure we know of when the interface was defined. Any
7921 	 * extra parameters we don't know about will be ignored in this request.
7922 	 */
7923 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7925 				       MGMT_STATUS_INVALID_PARAMS);
7926 
7927 	flags = __le32_to_cpu(cp->flags);
7928 
7929 	if (!requested_adv_flags_are_valid(hdev, flags))
7930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7931 				       MGMT_STATUS_INVALID_PARAMS);
7932 
7933 	hci_dev_lock(hdev);
7934 
7935 	/* In new interface, we require that we are powered to register */
7936 	if (!hdev_is_powered(hdev)) {
7937 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7938 				      MGMT_STATUS_REJECTED);
7939 		goto unlock;
7940 	}
7941 
7942 	if (adv_busy(hdev)) {
7943 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7944 				      MGMT_STATUS_BUSY);
7945 		goto unlock;
7946 	}
7947 
7948 	/* Parse defined parameters from request, use defaults otherwise */
7949 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7950 		  __le16_to_cpu(cp->timeout) : 0;
7951 
7952 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7953 		   __le16_to_cpu(cp->duration) :
7954 		   hdev->def_multi_adv_rotation_duration;
7955 
7956 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7957 		       __le32_to_cpu(cp->min_interval) :
7958 		       hdev->le_adv_min_interval;
7959 
7960 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7961 		       __le32_to_cpu(cp->max_interval) :
7962 		       hdev->le_adv_max_interval;
7963 
7964 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7965 		   cp->tx_power :
7966 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
7967 
7968 	/* Create advertising instance with no advertising or response data */
7969 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7970 				   0, NULL, 0, NULL, timeout, duration,
7971 				   tx_power, min_interval, max_interval);
7972 
7973 	if (err < 0) {
7974 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7975 				      MGMT_STATUS_FAILED);
7976 		goto unlock;
7977 	}
7978 
7979 	hdev->cur_adv_instance = cp->instance;
7980 	/* Submit request for advertising params if ext adv available */
7981 	if (ext_adv_capable(hdev)) {
7982 		hci_req_init(&req, hdev);
7983 		adv_instance = hci_find_adv_instance(hdev, cp->instance);
7984 
7985 		/* Updating parameters of an active instance will return a
7986 		 * Command Disallowed error, so we must first disable the
7987 		 * instance if it is active.
7988 		 */
7989 		if (!adv_instance->pending)
7990 			__hci_req_disable_ext_adv_instance(&req, cp->instance);
7991 
7992 		__hci_req_setup_ext_adv_instance(&req, cp->instance);
7993 
7994 		err = hci_req_run(&req, add_ext_adv_params_complete);
7995 
7996 		if (!err)
7997 			cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
7998 					       hdev, data, data_len);
7999 		if (!cmd) {
8000 			err = -ENOMEM;
8001 			hci_remove_adv_instance(hdev, cp->instance);
8002 			goto unlock;
8003 		}
8004 
8005 	} else {
8006 		rp.instance = cp->instance;
8007 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8008 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8009 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8010 		err = mgmt_cmd_complete(sk, hdev->id,
8011 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8012 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8013 	}
8014 
8015 unlock:
8016 	hci_dev_unlock(hdev);
8017 
8018 	return err;
8019 }
8020 
8021 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8022 			    u16 data_len)
8023 {
8024 	struct mgmt_cp_add_ext_adv_data *cp = data;
8025 	struct mgmt_rp_add_ext_adv_data rp;
8026 	u8 schedule_instance = 0;
8027 	struct adv_info *next_instance;
8028 	struct adv_info *adv_instance;
8029 	int err = 0;
8030 	struct mgmt_pending_cmd *cmd;
8031 	struct hci_request req;
8032 
8033 	BT_DBG("%s", hdev->name);
8034 
8035 	hci_dev_lock(hdev);
8036 
8037 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8038 
8039 	if (!adv_instance) {
8040 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8041 				      MGMT_STATUS_INVALID_PARAMS);
8042 		goto unlock;
8043 	}
8044 
8045 	/* In new interface, we require that we are powered to register */
8046 	if (!hdev_is_powered(hdev)) {
8047 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8048 				      MGMT_STATUS_REJECTED);
8049 		goto clear_new_instance;
8050 	}
8051 
8052 	if (adv_busy(hdev)) {
8053 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8054 				      MGMT_STATUS_BUSY);
8055 		goto clear_new_instance;
8056 	}
8057 
8058 	/* Validate new data */
8059 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8060 			       cp->adv_data_len, true) ||
8061 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8062 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8063 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8064 				      MGMT_STATUS_INVALID_PARAMS);
8065 		goto clear_new_instance;
8066 	}
8067 
8068 	/* Set the data in the advertising instance */
8069 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8070 				  cp->data, cp->scan_rsp_len,
8071 				  cp->data + cp->adv_data_len);
8072 
8073 	/* We're good to go, update advertising data, parameters, and start
8074 	 * advertising.
8075 	 */
8076 
8077 	hci_req_init(&req, hdev);
8078 
8079 	hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8080 
8081 	if (ext_adv_capable(hdev)) {
8082 		__hci_req_update_adv_data(&req, cp->instance);
8083 		__hci_req_update_scan_rsp_data(&req, cp->instance);
8084 		__hci_req_enable_ext_advertising(&req, cp->instance);
8085 
8086 	} else {
8087 		/* If using software rotation, determine next instance to use */
8088 
8089 		if (hdev->cur_adv_instance == cp->instance) {
8090 			/* If the currently advertised instance is being changed
8091 			 * then cancel the current advertising and schedule the
8092 			 * next instance. If there is only one instance then the
8093 			 * overridden advertising data will be visible right
8094 			 * away
8095 			 */
8096 			cancel_adv_timeout(hdev);
8097 
8098 			next_instance = hci_get_next_instance(hdev,
8099 							      cp->instance);
8100 			if (next_instance)
8101 				schedule_instance = next_instance->instance;
8102 		} else if (!hdev->adv_instance_timeout) {
8103 			/* Immediately advertise the new instance if no other
8104 			 * instance is currently being advertised.
8105 			 */
8106 			schedule_instance = cp->instance;
8107 		}
8108 
8109 		/* If the HCI_ADVERTISING flag is set or there is no instance to
8110 		 * be advertised then we have no HCI communication to make.
8111 		 * Simply return.
8112 		 */
8113 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8114 		    !schedule_instance) {
8115 			if (adv_instance->pending) {
8116 				mgmt_advertising_added(sk, hdev, cp->instance);
8117 				adv_instance->pending = false;
8118 			}
8119 			rp.instance = cp->instance;
8120 			err = mgmt_cmd_complete(sk, hdev->id,
8121 						MGMT_OP_ADD_EXT_ADV_DATA,
8122 						MGMT_STATUS_SUCCESS, &rp,
8123 						sizeof(rp));
8124 			goto unlock;
8125 		}
8126 
8127 		err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8128 						      true);
8129 	}
8130 
8131 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8132 			       data_len);
8133 	if (!cmd) {
8134 		err = -ENOMEM;
8135 		goto clear_new_instance;
8136 	}
8137 
8138 	if (!err)
8139 		err = hci_req_run(&req, add_advertising_complete);
8140 
8141 	if (err < 0) {
8142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8143 				      MGMT_STATUS_FAILED);
8144 		mgmt_pending_remove(cmd);
8145 		goto clear_new_instance;
8146 	}
8147 
8148 	/* We were successful in updating data, so trigger advertising_added
8149 	 * event if this is an instance that wasn't previously advertising. If
8150 	 * a failure occurs in the requests we initiated, we will remove the
8151 	 * instance again in add_advertising_complete
8152 	 */
8153 	if (adv_instance->pending)
8154 		mgmt_advertising_added(sk, hdev, cp->instance);
8155 
8156 	goto unlock;
8157 
8158 clear_new_instance:
8159 	hci_remove_adv_instance(hdev, cp->instance);
8160 
8161 unlock:
8162 	hci_dev_unlock(hdev);
8163 
8164 	return err;
8165 }
8166 
8167 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8168 					u16 opcode)
8169 {
8170 	struct mgmt_pending_cmd *cmd;
8171 	struct mgmt_cp_remove_advertising *cp;
8172 	struct mgmt_rp_remove_advertising rp;
8173 
8174 	bt_dev_dbg(hdev, "status %d", status);
8175 
8176 	hci_dev_lock(hdev);
8177 
8178 	/* A failure status here only means that we failed to disable
8179 	 * advertising. Otherwise, the advertising instance has been removed,
8180 	 * so report success.
8181 	 */
8182 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8183 	if (!cmd)
8184 		goto unlock;
8185 
8186 	cp = cmd->param;
8187 	rp.instance = cp->instance;
8188 
8189 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8190 			  &rp, sizeof(rp));
8191 	mgmt_pending_remove(cmd);
8192 
8193 unlock:
8194 	hci_dev_unlock(hdev);
8195 }
8196 
8197 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8198 			      void *data, u16 data_len)
8199 {
8200 	struct mgmt_cp_remove_advertising *cp = data;
8201 	struct mgmt_rp_remove_advertising rp;
8202 	struct mgmt_pending_cmd *cmd;
8203 	struct hci_request req;
8204 	int err;
8205 
8206 	bt_dev_dbg(hdev, "sock %p", sk);
8207 
8208 	/* Enabling the experimental LL Privay support disables support for
8209 	 * advertising.
8210 	 */
8211 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8212 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8213 				       MGMT_STATUS_NOT_SUPPORTED);
8214 
8215 	hci_dev_lock(hdev);
8216 
8217 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8218 		err = mgmt_cmd_status(sk, hdev->id,
8219 				      MGMT_OP_REMOVE_ADVERTISING,
8220 				      MGMT_STATUS_INVALID_PARAMS);
8221 		goto unlock;
8222 	}
8223 
8224 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8225 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8226 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8227 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8228 				      MGMT_STATUS_BUSY);
8229 		goto unlock;
8230 	}
8231 
8232 	if (list_empty(&hdev->adv_instances)) {
8233 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8234 				      MGMT_STATUS_INVALID_PARAMS);
8235 		goto unlock;
8236 	}
8237 
8238 	hci_req_init(&req, hdev);
8239 
8240 	/* If we use extended advertising, instance is disabled and removed */
8241 	if (ext_adv_capable(hdev)) {
8242 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
8243 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
8244 	}
8245 
8246 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8247 
8248 	if (list_empty(&hdev->adv_instances))
8249 		__hci_req_disable_advertising(&req);
8250 
8251 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
8252 	 * flag is set or the device isn't powered then we have no HCI
8253 	 * communication to make. Simply return.
8254 	 */
8255 	if (skb_queue_empty(&req.cmd_q) ||
8256 	    !hdev_is_powered(hdev) ||
8257 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8258 		hci_req_purge(&req);
8259 		rp.instance = cp->instance;
8260 		err = mgmt_cmd_complete(sk, hdev->id,
8261 					MGMT_OP_REMOVE_ADVERTISING,
8262 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8263 		goto unlock;
8264 	}
8265 
8266 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8267 			       data_len);
8268 	if (!cmd) {
8269 		err = -ENOMEM;
8270 		goto unlock;
8271 	}
8272 
8273 	err = hci_req_run(&req, remove_advertising_complete);
8274 	if (err < 0)
8275 		mgmt_pending_remove(cmd);
8276 
8277 unlock:
8278 	hci_dev_unlock(hdev);
8279 
8280 	return err;
8281 }
8282 
8283 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8284 			     void *data, u16 data_len)
8285 {
8286 	struct mgmt_cp_get_adv_size_info *cp = data;
8287 	struct mgmt_rp_get_adv_size_info rp;
8288 	u32 flags, supported_flags;
8289 	int err;
8290 
8291 	bt_dev_dbg(hdev, "sock %p", sk);
8292 
8293 	if (!lmp_le_capable(hdev))
8294 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8295 				       MGMT_STATUS_REJECTED);
8296 
8297 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8299 				       MGMT_STATUS_INVALID_PARAMS);
8300 
8301 	flags = __le32_to_cpu(cp->flags);
8302 
8303 	/* The current implementation only supports a subset of the specified
8304 	 * flags.
8305 	 */
8306 	supported_flags = get_supported_adv_flags(hdev);
8307 	if (flags & ~supported_flags)
8308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8309 				       MGMT_STATUS_INVALID_PARAMS);
8310 
8311 	rp.instance = cp->instance;
8312 	rp.flags = cp->flags;
8313 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8314 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8315 
8316 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8317 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8318 
8319 	return err;
8320 }
8321 
8322 static const struct hci_mgmt_handler mgmt_handlers[] = {
8323 	{ NULL }, /* 0x0000 (no command) */
8324 	{ read_version,            MGMT_READ_VERSION_SIZE,
8325 						HCI_MGMT_NO_HDEV |
8326 						HCI_MGMT_UNTRUSTED },
8327 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8328 						HCI_MGMT_NO_HDEV |
8329 						HCI_MGMT_UNTRUSTED },
8330 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8331 						HCI_MGMT_NO_HDEV |
8332 						HCI_MGMT_UNTRUSTED },
8333 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8334 						HCI_MGMT_UNTRUSTED },
8335 	{ set_powered,             MGMT_SETTING_SIZE },
8336 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8337 	{ set_connectable,         MGMT_SETTING_SIZE },
8338 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8339 	{ set_bondable,            MGMT_SETTING_SIZE },
8340 	{ set_link_security,       MGMT_SETTING_SIZE },
8341 	{ set_ssp,                 MGMT_SETTING_SIZE },
8342 	{ set_hs,                  MGMT_SETTING_SIZE },
8343 	{ set_le,                  MGMT_SETTING_SIZE },
8344 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8345 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8346 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8347 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8348 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8349 						HCI_MGMT_VAR_LEN },
8350 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8351 						HCI_MGMT_VAR_LEN },
8352 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8353 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8354 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8355 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8356 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8357 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8358 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8359 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8360 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8361 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8362 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8363 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8364 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8365 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8366 						HCI_MGMT_VAR_LEN },
8367 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8368 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8369 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8370 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8371 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8372 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8373 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8374 	{ set_advertising,         MGMT_SETTING_SIZE },
8375 	{ set_bredr,               MGMT_SETTING_SIZE },
8376 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8377 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8378 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8379 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8380 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8381 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8382 						HCI_MGMT_VAR_LEN },
8383 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8384 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8385 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8386 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8387 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8388 						HCI_MGMT_VAR_LEN },
8389 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8390 						HCI_MGMT_NO_HDEV |
8391 						HCI_MGMT_UNTRUSTED },
8392 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8393 						HCI_MGMT_UNCONFIGURED |
8394 						HCI_MGMT_UNTRUSTED },
8395 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8396 						HCI_MGMT_UNCONFIGURED },
8397 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8398 						HCI_MGMT_UNCONFIGURED },
8399 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8400 						HCI_MGMT_VAR_LEN },
8401 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8402 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8403 						HCI_MGMT_NO_HDEV |
8404 						HCI_MGMT_UNTRUSTED },
8405 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8406 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8407 						HCI_MGMT_VAR_LEN },
8408 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8409 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8410 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8411 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8412 						HCI_MGMT_UNTRUSTED },
8413 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8414 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8415 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8416 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8417 						HCI_MGMT_VAR_LEN },
8418 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8419 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8420 						HCI_MGMT_UNTRUSTED },
8421 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8422 						HCI_MGMT_UNTRUSTED |
8423 						HCI_MGMT_HDEV_OPTIONAL },
8424 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8425 						HCI_MGMT_VAR_LEN |
8426 						HCI_MGMT_HDEV_OPTIONAL },
8427 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8428 						HCI_MGMT_UNTRUSTED },
8429 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8430 						HCI_MGMT_VAR_LEN },
8431 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8432 						HCI_MGMT_UNTRUSTED },
8433 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8434 						HCI_MGMT_VAR_LEN },
8435 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8436 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8437 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8438 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8439 						HCI_MGMT_VAR_LEN },
8440 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8441 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8442 						HCI_MGMT_VAR_LEN },
8443 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8444 						HCI_MGMT_VAR_LEN },
8445 	{ add_adv_patterns_monitor_rssi,
8446 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8447 						HCI_MGMT_VAR_LEN },
8448 };
8449 
8450 void mgmt_index_added(struct hci_dev *hdev)
8451 {
8452 	struct mgmt_ev_ext_index ev;
8453 
8454 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8455 		return;
8456 
8457 	switch (hdev->dev_type) {
8458 	case HCI_PRIMARY:
8459 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8460 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8461 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8462 			ev.type = 0x01;
8463 		} else {
8464 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8465 					 HCI_MGMT_INDEX_EVENTS);
8466 			ev.type = 0x00;
8467 		}
8468 		break;
8469 	case HCI_AMP:
8470 		ev.type = 0x02;
8471 		break;
8472 	default:
8473 		return;
8474 	}
8475 
8476 	ev.bus = hdev->bus;
8477 
8478 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8479 			 HCI_MGMT_EXT_INDEX_EVENTS);
8480 }
8481 
8482 void mgmt_index_removed(struct hci_dev *hdev)
8483 {
8484 	struct mgmt_ev_ext_index ev;
8485 	u8 status = MGMT_STATUS_INVALID_INDEX;
8486 
8487 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8488 		return;
8489 
8490 	switch (hdev->dev_type) {
8491 	case HCI_PRIMARY:
8492 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8493 
8494 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8495 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8496 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8497 			ev.type = 0x01;
8498 		} else {
8499 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8500 					 HCI_MGMT_INDEX_EVENTS);
8501 			ev.type = 0x00;
8502 		}
8503 		break;
8504 	case HCI_AMP:
8505 		ev.type = 0x02;
8506 		break;
8507 	default:
8508 		return;
8509 	}
8510 
8511 	ev.bus = hdev->bus;
8512 
8513 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8514 			 HCI_MGMT_EXT_INDEX_EVENTS);
8515 }
8516 
8517 /* This function requires the caller holds hdev->lock */
8518 static void restart_le_actions(struct hci_dev *hdev)
8519 {
8520 	struct hci_conn_params *p;
8521 
8522 	list_for_each_entry(p, &hdev->le_conn_params, list) {
8523 		/* Needed for AUTO_OFF case where might not "really"
8524 		 * have been powered off.
8525 		 */
8526 		list_del_init(&p->action);
8527 
8528 		switch (p->auto_connect) {
8529 		case HCI_AUTO_CONN_DIRECT:
8530 		case HCI_AUTO_CONN_ALWAYS:
8531 			list_add(&p->action, &hdev->pend_le_conns);
8532 			break;
8533 		case HCI_AUTO_CONN_REPORT:
8534 			list_add(&p->action, &hdev->pend_le_reports);
8535 			break;
8536 		default:
8537 			break;
8538 		}
8539 	}
8540 }
8541 
8542 void mgmt_power_on(struct hci_dev *hdev, int err)
8543 {
8544 	struct cmd_lookup match = { NULL, hdev };
8545 
8546 	bt_dev_dbg(hdev, "err %d", err);
8547 
8548 	hci_dev_lock(hdev);
8549 
8550 	if (!err) {
8551 		restart_le_actions(hdev);
8552 		hci_update_background_scan(hdev);
8553 	}
8554 
8555 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8556 
8557 	new_settings(hdev, match.sk);
8558 
8559 	if (match.sk)
8560 		sock_put(match.sk);
8561 
8562 	hci_dev_unlock(hdev);
8563 }
8564 
8565 void __mgmt_power_off(struct hci_dev *hdev)
8566 {
8567 	struct cmd_lookup match = { NULL, hdev };
8568 	u8 status, zero_cod[] = { 0, 0, 0 };
8569 
8570 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8571 
8572 	/* If the power off is because of hdev unregistration let
8573 	 * use the appropriate INVALID_INDEX status. Otherwise use
8574 	 * NOT_POWERED. We cover both scenarios here since later in
8575 	 * mgmt_index_removed() any hci_conn callbacks will have already
8576 	 * been triggered, potentially causing misleading DISCONNECTED
8577 	 * status responses.
8578 	 */
8579 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8580 		status = MGMT_STATUS_INVALID_INDEX;
8581 	else
8582 		status = MGMT_STATUS_NOT_POWERED;
8583 
8584 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8585 
8586 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8587 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8588 				   zero_cod, sizeof(zero_cod),
8589 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8590 		ext_info_changed(hdev, NULL);
8591 	}
8592 
8593 	new_settings(hdev, match.sk);
8594 
8595 	if (match.sk)
8596 		sock_put(match.sk);
8597 }
8598 
8599 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8600 {
8601 	struct mgmt_pending_cmd *cmd;
8602 	u8 status;
8603 
8604 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8605 	if (!cmd)
8606 		return;
8607 
8608 	if (err == -ERFKILL)
8609 		status = MGMT_STATUS_RFKILLED;
8610 	else
8611 		status = MGMT_STATUS_FAILED;
8612 
8613 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8614 
8615 	mgmt_pending_remove(cmd);
8616 }
8617 
8618 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8619 		       bool persistent)
8620 {
8621 	struct mgmt_ev_new_link_key ev;
8622 
8623 	memset(&ev, 0, sizeof(ev));
8624 
8625 	ev.store_hint = persistent;
8626 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8627 	ev.key.addr.type = BDADDR_BREDR;
8628 	ev.key.type = key->type;
8629 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8630 	ev.key.pin_len = key->pin_len;
8631 
8632 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8633 }
8634 
8635 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8636 {
8637 	switch (ltk->type) {
8638 	case SMP_LTK:
8639 	case SMP_LTK_SLAVE:
8640 		if (ltk->authenticated)
8641 			return MGMT_LTK_AUTHENTICATED;
8642 		return MGMT_LTK_UNAUTHENTICATED;
8643 	case SMP_LTK_P256:
8644 		if (ltk->authenticated)
8645 			return MGMT_LTK_P256_AUTH;
8646 		return MGMT_LTK_P256_UNAUTH;
8647 	case SMP_LTK_P256_DEBUG:
8648 		return MGMT_LTK_P256_DEBUG;
8649 	}
8650 
8651 	return MGMT_LTK_UNAUTHENTICATED;
8652 }
8653 
8654 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8655 {
8656 	struct mgmt_ev_new_long_term_key ev;
8657 
8658 	memset(&ev, 0, sizeof(ev));
8659 
8660 	/* Devices using resolvable or non-resolvable random addresses
8661 	 * without providing an identity resolving key don't require
8662 	 * to store long term keys. Their addresses will change the
8663 	 * next time around.
8664 	 *
8665 	 * Only when a remote device provides an identity address
8666 	 * make sure the long term key is stored. If the remote
8667 	 * identity is known, the long term keys are internally
8668 	 * mapped to the identity address. So allow static random
8669 	 * and public addresses here.
8670 	 */
8671 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8672 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8673 		ev.store_hint = 0x00;
8674 	else
8675 		ev.store_hint = persistent;
8676 
8677 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8678 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8679 	ev.key.type = mgmt_ltk_type(key);
8680 	ev.key.enc_size = key->enc_size;
8681 	ev.key.ediv = key->ediv;
8682 	ev.key.rand = key->rand;
8683 
8684 	if (key->type == SMP_LTK)
8685 		ev.key.master = 1;
8686 
8687 	/* Make sure we copy only the significant bytes based on the
8688 	 * encryption key size, and set the rest of the value to zeroes.
8689 	 */
8690 	memcpy(ev.key.val, key->val, key->enc_size);
8691 	memset(ev.key.val + key->enc_size, 0,
8692 	       sizeof(ev.key.val) - key->enc_size);
8693 
8694 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8695 }
8696 
8697 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8698 {
8699 	struct mgmt_ev_new_irk ev;
8700 
8701 	memset(&ev, 0, sizeof(ev));
8702 
8703 	ev.store_hint = persistent;
8704 
8705 	bacpy(&ev.rpa, &irk->rpa);
8706 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8707 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8708 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8709 
8710 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8711 }
8712 
8713 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8714 		   bool persistent)
8715 {
8716 	struct mgmt_ev_new_csrk ev;
8717 
8718 	memset(&ev, 0, sizeof(ev));
8719 
8720 	/* Devices using resolvable or non-resolvable random addresses
8721 	 * without providing an identity resolving key don't require
8722 	 * to store signature resolving keys. Their addresses will change
8723 	 * the next time around.
8724 	 *
8725 	 * Only when a remote device provides an identity address
8726 	 * make sure the signature resolving key is stored. So allow
8727 	 * static random and public addresses here.
8728 	 */
8729 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8730 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8731 		ev.store_hint = 0x00;
8732 	else
8733 		ev.store_hint = persistent;
8734 
8735 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8736 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8737 	ev.key.type = csrk->type;
8738 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8739 
8740 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8741 }
8742 
8743 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8744 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8745 			 u16 max_interval, u16 latency, u16 timeout)
8746 {
8747 	struct mgmt_ev_new_conn_param ev;
8748 
8749 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8750 		return;
8751 
8752 	memset(&ev, 0, sizeof(ev));
8753 	bacpy(&ev.addr.bdaddr, bdaddr);
8754 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8755 	ev.store_hint = store_hint;
8756 	ev.min_interval = cpu_to_le16(min_interval);
8757 	ev.max_interval = cpu_to_le16(max_interval);
8758 	ev.latency = cpu_to_le16(latency);
8759 	ev.timeout = cpu_to_le16(timeout);
8760 
8761 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8762 }
8763 
8764 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8765 			   u32 flags, u8 *name, u8 name_len)
8766 {
8767 	char buf[512];
8768 	struct mgmt_ev_device_connected *ev = (void *) buf;
8769 	u16 eir_len = 0;
8770 
8771 	bacpy(&ev->addr.bdaddr, &conn->dst);
8772 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8773 
8774 	ev->flags = __cpu_to_le32(flags);
8775 
8776 	/* We must ensure that the EIR Data fields are ordered and
8777 	 * unique. Keep it simple for now and avoid the problem by not
8778 	 * adding any BR/EDR data to the LE adv.
8779 	 */
8780 	if (conn->le_adv_data_len > 0) {
8781 		memcpy(&ev->eir[eir_len],
8782 		       conn->le_adv_data, conn->le_adv_data_len);
8783 		eir_len = conn->le_adv_data_len;
8784 	} else {
8785 		if (name_len > 0)
8786 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8787 						  name, name_len);
8788 
8789 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8790 			eir_len = eir_append_data(ev->eir, eir_len,
8791 						  EIR_CLASS_OF_DEV,
8792 						  conn->dev_class, 3);
8793 	}
8794 
8795 	ev->eir_len = cpu_to_le16(eir_len);
8796 
8797 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8798 		    sizeof(*ev) + eir_len, NULL);
8799 }
8800 
8801 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8802 {
8803 	struct sock **sk = data;
8804 
8805 	cmd->cmd_complete(cmd, 0);
8806 
8807 	*sk = cmd->sk;
8808 	sock_hold(*sk);
8809 
8810 	mgmt_pending_remove(cmd);
8811 }
8812 
8813 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8814 {
8815 	struct hci_dev *hdev = data;
8816 	struct mgmt_cp_unpair_device *cp = cmd->param;
8817 
8818 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8819 
8820 	cmd->cmd_complete(cmd, 0);
8821 	mgmt_pending_remove(cmd);
8822 }
8823 
8824 bool mgmt_powering_down(struct hci_dev *hdev)
8825 {
8826 	struct mgmt_pending_cmd *cmd;
8827 	struct mgmt_mode *cp;
8828 
8829 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8830 	if (!cmd)
8831 		return false;
8832 
8833 	cp = cmd->param;
8834 	if (!cp->val)
8835 		return true;
8836 
8837 	return false;
8838 }
8839 
8840 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8841 			      u8 link_type, u8 addr_type, u8 reason,
8842 			      bool mgmt_connected)
8843 {
8844 	struct mgmt_ev_device_disconnected ev;
8845 	struct sock *sk = NULL;
8846 
8847 	/* The connection is still in hci_conn_hash so test for 1
8848 	 * instead of 0 to know if this is the last one.
8849 	 */
8850 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8851 		cancel_delayed_work(&hdev->power_off);
8852 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8853 	}
8854 
8855 	if (!mgmt_connected)
8856 		return;
8857 
8858 	if (link_type != ACL_LINK && link_type != LE_LINK)
8859 		return;
8860 
8861 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8862 
8863 	bacpy(&ev.addr.bdaddr, bdaddr);
8864 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8865 	ev.reason = reason;
8866 
8867 	/* Report disconnects due to suspend */
8868 	if (hdev->suspended)
8869 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8870 
8871 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8872 
8873 	if (sk)
8874 		sock_put(sk);
8875 
8876 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8877 			     hdev);
8878 }
8879 
8880 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8881 			    u8 link_type, u8 addr_type, u8 status)
8882 {
8883 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8884 	struct mgmt_cp_disconnect *cp;
8885 	struct mgmt_pending_cmd *cmd;
8886 
8887 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8888 			     hdev);
8889 
8890 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8891 	if (!cmd)
8892 		return;
8893 
8894 	cp = cmd->param;
8895 
8896 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8897 		return;
8898 
8899 	if (cp->addr.type != bdaddr_type)
8900 		return;
8901 
8902 	cmd->cmd_complete(cmd, mgmt_status(status));
8903 	mgmt_pending_remove(cmd);
8904 }
8905 
8906 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8907 			 u8 addr_type, u8 status)
8908 {
8909 	struct mgmt_ev_connect_failed ev;
8910 
8911 	/* The connection is still in hci_conn_hash so test for 1
8912 	 * instead of 0 to know if this is the last one.
8913 	 */
8914 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8915 		cancel_delayed_work(&hdev->power_off);
8916 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8917 	}
8918 
8919 	bacpy(&ev.addr.bdaddr, bdaddr);
8920 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8921 	ev.status = mgmt_status(status);
8922 
8923 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8924 }
8925 
8926 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8927 {
8928 	struct mgmt_ev_pin_code_request ev;
8929 
8930 	bacpy(&ev.addr.bdaddr, bdaddr);
8931 	ev.addr.type = BDADDR_BREDR;
8932 	ev.secure = secure;
8933 
8934 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8935 }
8936 
8937 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8938 				  u8 status)
8939 {
8940 	struct mgmt_pending_cmd *cmd;
8941 
8942 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8943 	if (!cmd)
8944 		return;
8945 
8946 	cmd->cmd_complete(cmd, mgmt_status(status));
8947 	mgmt_pending_remove(cmd);
8948 }
8949 
8950 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8951 				      u8 status)
8952 {
8953 	struct mgmt_pending_cmd *cmd;
8954 
8955 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8956 	if (!cmd)
8957 		return;
8958 
8959 	cmd->cmd_complete(cmd, mgmt_status(status));
8960 	mgmt_pending_remove(cmd);
8961 }
8962 
8963 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8964 			      u8 link_type, u8 addr_type, u32 value,
8965 			      u8 confirm_hint)
8966 {
8967 	struct mgmt_ev_user_confirm_request ev;
8968 
8969 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8970 
8971 	bacpy(&ev.addr.bdaddr, bdaddr);
8972 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8973 	ev.confirm_hint = confirm_hint;
8974 	ev.value = cpu_to_le32(value);
8975 
8976 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8977 			  NULL);
8978 }
8979 
8980 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8981 			      u8 link_type, u8 addr_type)
8982 {
8983 	struct mgmt_ev_user_passkey_request ev;
8984 
8985 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8986 
8987 	bacpy(&ev.addr.bdaddr, bdaddr);
8988 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8989 
8990 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8991 			  NULL);
8992 }
8993 
8994 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8995 				      u8 link_type, u8 addr_type, u8 status,
8996 				      u8 opcode)
8997 {
8998 	struct mgmt_pending_cmd *cmd;
8999 
9000 	cmd = pending_find(opcode, hdev);
9001 	if (!cmd)
9002 		return -ENOENT;
9003 
9004 	cmd->cmd_complete(cmd, mgmt_status(status));
9005 	mgmt_pending_remove(cmd);
9006 
9007 	return 0;
9008 }
9009 
9010 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9011 				     u8 link_type, u8 addr_type, u8 status)
9012 {
9013 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9014 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9015 }
9016 
9017 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9018 					 u8 link_type, u8 addr_type, u8 status)
9019 {
9020 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9021 					  status,
9022 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9023 }
9024 
9025 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9026 				     u8 link_type, u8 addr_type, u8 status)
9027 {
9028 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9029 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9030 }
9031 
9032 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9033 					 u8 link_type, u8 addr_type, u8 status)
9034 {
9035 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9036 					  status,
9037 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9038 }
9039 
9040 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9041 			     u8 link_type, u8 addr_type, u32 passkey,
9042 			     u8 entered)
9043 {
9044 	struct mgmt_ev_passkey_notify ev;
9045 
9046 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9047 
9048 	bacpy(&ev.addr.bdaddr, bdaddr);
9049 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9050 	ev.passkey = __cpu_to_le32(passkey);
9051 	ev.entered = entered;
9052 
9053 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9054 }
9055 
9056 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9057 {
9058 	struct mgmt_ev_auth_failed ev;
9059 	struct mgmt_pending_cmd *cmd;
9060 	u8 status = mgmt_status(hci_status);
9061 
9062 	bacpy(&ev.addr.bdaddr, &conn->dst);
9063 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9064 	ev.status = status;
9065 
9066 	cmd = find_pairing(conn);
9067 
9068 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9069 		    cmd ? cmd->sk : NULL);
9070 
9071 	if (cmd) {
9072 		cmd->cmd_complete(cmd, status);
9073 		mgmt_pending_remove(cmd);
9074 	}
9075 }
9076 
9077 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9078 {
9079 	struct cmd_lookup match = { NULL, hdev };
9080 	bool changed;
9081 
9082 	if (status) {
9083 		u8 mgmt_err = mgmt_status(status);
9084 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9085 				     cmd_status_rsp, &mgmt_err);
9086 		return;
9087 	}
9088 
9089 	if (test_bit(HCI_AUTH, &hdev->flags))
9090 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9091 	else
9092 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9093 
9094 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9095 			     &match);
9096 
9097 	if (changed)
9098 		new_settings(hdev, match.sk);
9099 
9100 	if (match.sk)
9101 		sock_put(match.sk);
9102 }
9103 
9104 static void clear_eir(struct hci_request *req)
9105 {
9106 	struct hci_dev *hdev = req->hdev;
9107 	struct hci_cp_write_eir cp;
9108 
9109 	if (!lmp_ext_inq_capable(hdev))
9110 		return;
9111 
9112 	memset(hdev->eir, 0, sizeof(hdev->eir));
9113 
9114 	memset(&cp, 0, sizeof(cp));
9115 
9116 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9117 }
9118 
9119 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9120 {
9121 	struct cmd_lookup match = { NULL, hdev };
9122 	struct hci_request req;
9123 	bool changed = false;
9124 
9125 	if (status) {
9126 		u8 mgmt_err = mgmt_status(status);
9127 
9128 		if (enable && hci_dev_test_and_clear_flag(hdev,
9129 							  HCI_SSP_ENABLED)) {
9130 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9131 			new_settings(hdev, NULL);
9132 		}
9133 
9134 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9135 				     &mgmt_err);
9136 		return;
9137 	}
9138 
9139 	if (enable) {
9140 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9141 	} else {
9142 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9143 		if (!changed)
9144 			changed = hci_dev_test_and_clear_flag(hdev,
9145 							      HCI_HS_ENABLED);
9146 		else
9147 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9148 	}
9149 
9150 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9151 
9152 	if (changed)
9153 		new_settings(hdev, match.sk);
9154 
9155 	if (match.sk)
9156 		sock_put(match.sk);
9157 
9158 	hci_req_init(&req, hdev);
9159 
9160 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9161 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9162 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9163 				    sizeof(enable), &enable);
9164 		__hci_req_update_eir(&req);
9165 	} else {
9166 		clear_eir(&req);
9167 	}
9168 
9169 	hci_req_run(&req, NULL);
9170 }
9171 
9172 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9173 {
9174 	struct cmd_lookup *match = data;
9175 
9176 	if (match->sk == NULL) {
9177 		match->sk = cmd->sk;
9178 		sock_hold(match->sk);
9179 	}
9180 }
9181 
9182 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9183 				    u8 status)
9184 {
9185 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9186 
9187 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9188 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9189 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9190 
9191 	if (!status) {
9192 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9193 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9194 		ext_info_changed(hdev, NULL);
9195 	}
9196 
9197 	if (match.sk)
9198 		sock_put(match.sk);
9199 }
9200 
9201 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9202 {
9203 	struct mgmt_cp_set_local_name ev;
9204 	struct mgmt_pending_cmd *cmd;
9205 
9206 	if (status)
9207 		return;
9208 
9209 	memset(&ev, 0, sizeof(ev));
9210 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9211 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9212 
9213 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9214 	if (!cmd) {
9215 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9216 
9217 		/* If this is a HCI command related to powering on the
9218 		 * HCI dev don't send any mgmt signals.
9219 		 */
9220 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9221 			return;
9222 	}
9223 
9224 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9225 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9226 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9227 }
9228 
9229 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9230 {
9231 	int i;
9232 
9233 	for (i = 0; i < uuid_count; i++) {
9234 		if (!memcmp(uuid, uuids[i], 16))
9235 			return true;
9236 	}
9237 
9238 	return false;
9239 }
9240 
9241 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9242 {
9243 	u16 parsed = 0;
9244 
9245 	while (parsed < eir_len) {
9246 		u8 field_len = eir[0];
9247 		u8 uuid[16];
9248 		int i;
9249 
9250 		if (field_len == 0)
9251 			break;
9252 
9253 		if (eir_len - parsed < field_len + 1)
9254 			break;
9255 
9256 		switch (eir[1]) {
9257 		case EIR_UUID16_ALL:
9258 		case EIR_UUID16_SOME:
9259 			for (i = 0; i + 3 <= field_len; i += 2) {
9260 				memcpy(uuid, bluetooth_base_uuid, 16);
9261 				uuid[13] = eir[i + 3];
9262 				uuid[12] = eir[i + 2];
9263 				if (has_uuid(uuid, uuid_count, uuids))
9264 					return true;
9265 			}
9266 			break;
9267 		case EIR_UUID32_ALL:
9268 		case EIR_UUID32_SOME:
9269 			for (i = 0; i + 5 <= field_len; i += 4) {
9270 				memcpy(uuid, bluetooth_base_uuid, 16);
9271 				uuid[15] = eir[i + 5];
9272 				uuid[14] = eir[i + 4];
9273 				uuid[13] = eir[i + 3];
9274 				uuid[12] = eir[i + 2];
9275 				if (has_uuid(uuid, uuid_count, uuids))
9276 					return true;
9277 			}
9278 			break;
9279 		case EIR_UUID128_ALL:
9280 		case EIR_UUID128_SOME:
9281 			for (i = 0; i + 17 <= field_len; i += 16) {
9282 				memcpy(uuid, eir + i + 2, 16);
9283 				if (has_uuid(uuid, uuid_count, uuids))
9284 					return true;
9285 			}
9286 			break;
9287 		}
9288 
9289 		parsed += field_len + 1;
9290 		eir += field_len + 1;
9291 	}
9292 
9293 	return false;
9294 }
9295 
9296 static void restart_le_scan(struct hci_dev *hdev)
9297 {
9298 	/* If controller is not scanning we are done. */
9299 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9300 		return;
9301 
9302 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9303 		       hdev->discovery.scan_start +
9304 		       hdev->discovery.scan_duration))
9305 		return;
9306 
9307 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9308 			   DISCOV_LE_RESTART_DELAY);
9309 }
9310 
9311 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9312 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9313 {
9314 	/* If a RSSI threshold has been specified, and
9315 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9316 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9317 	 * is set, let it through for further processing, as we might need to
9318 	 * restart the scan.
9319 	 *
9320 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9321 	 * the results are also dropped.
9322 	 */
9323 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9324 	    (rssi == HCI_RSSI_INVALID ||
9325 	    (rssi < hdev->discovery.rssi &&
9326 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9327 		return  false;
9328 
9329 	if (hdev->discovery.uuid_count != 0) {
9330 		/* If a list of UUIDs is provided in filter, results with no
9331 		 * matching UUID should be dropped.
9332 		 */
9333 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9334 				   hdev->discovery.uuids) &&
9335 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9336 				   hdev->discovery.uuid_count,
9337 				   hdev->discovery.uuids))
9338 			return false;
9339 	}
9340 
9341 	/* If duplicate filtering does not report RSSI changes, then restart
9342 	 * scanning to ensure updated result with updated RSSI values.
9343 	 */
9344 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9345 		restart_le_scan(hdev);
9346 
9347 		/* Validate RSSI value against the RSSI threshold once more. */
9348 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9349 		    rssi < hdev->discovery.rssi)
9350 			return false;
9351 	}
9352 
9353 	return true;
9354 }
9355 
9356 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9357 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9358 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9359 {
9360 	char buf[512];
9361 	struct mgmt_ev_device_found *ev = (void *)buf;
9362 	size_t ev_size;
9363 
9364 	/* Don't send events for a non-kernel initiated discovery. With
9365 	 * LE one exception is if we have pend_le_reports > 0 in which
9366 	 * case we're doing passive scanning and want these events.
9367 	 */
9368 	if (!hci_discovery_active(hdev)) {
9369 		if (link_type == ACL_LINK)
9370 			return;
9371 		if (link_type == LE_LINK &&
9372 		    list_empty(&hdev->pend_le_reports) &&
9373 		    !hci_is_adv_monitoring(hdev)) {
9374 			return;
9375 		}
9376 	}
9377 
9378 	if (hdev->discovery.result_filtering) {
9379 		/* We are using service discovery */
9380 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9381 				     scan_rsp_len))
9382 			return;
9383 	}
9384 
9385 	if (hdev->discovery.limited) {
9386 		/* Check for limited discoverable bit */
9387 		if (dev_class) {
9388 			if (!(dev_class[1] & 0x20))
9389 				return;
9390 		} else {
9391 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9392 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9393 				return;
9394 		}
9395 	}
9396 
9397 	/* Make sure that the buffer is big enough. The 5 extra bytes
9398 	 * are for the potential CoD field.
9399 	 */
9400 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9401 		return;
9402 
9403 	memset(buf, 0, sizeof(buf));
9404 
9405 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9406 	 * RSSI value was reported as 0 when not available. This behavior
9407 	 * is kept when using device discovery. This is required for full
9408 	 * backwards compatibility with the API.
9409 	 *
9410 	 * However when using service discovery, the value 127 will be
9411 	 * returned when the RSSI is not available.
9412 	 */
9413 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9414 	    link_type == ACL_LINK)
9415 		rssi = 0;
9416 
9417 	bacpy(&ev->addr.bdaddr, bdaddr);
9418 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9419 	ev->rssi = rssi;
9420 	ev->flags = cpu_to_le32(flags);
9421 
9422 	if (eir_len > 0)
9423 		/* Copy EIR or advertising data into event */
9424 		memcpy(ev->eir, eir, eir_len);
9425 
9426 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9427 				       NULL))
9428 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9429 					  dev_class, 3);
9430 
9431 	if (scan_rsp_len > 0)
9432 		/* Append scan response data to event */
9433 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9434 
9435 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9436 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9437 
9438 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9439 }
9440 
9441 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9442 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9443 {
9444 	struct mgmt_ev_device_found *ev;
9445 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9446 	u16 eir_len;
9447 
9448 	ev = (struct mgmt_ev_device_found *) buf;
9449 
9450 	memset(buf, 0, sizeof(buf));
9451 
9452 	bacpy(&ev->addr.bdaddr, bdaddr);
9453 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9454 	ev->rssi = rssi;
9455 
9456 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9457 				  name_len);
9458 
9459 	ev->eir_len = cpu_to_le16(eir_len);
9460 
9461 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9462 }
9463 
9464 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9465 {
9466 	struct mgmt_ev_discovering ev;
9467 
9468 	bt_dev_dbg(hdev, "discovering %u", discovering);
9469 
9470 	memset(&ev, 0, sizeof(ev));
9471 	ev.type = hdev->discovery.type;
9472 	ev.discovering = discovering;
9473 
9474 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9475 }
9476 
9477 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9478 {
9479 	struct mgmt_ev_controller_suspend ev;
9480 
9481 	ev.suspend_state = state;
9482 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9483 }
9484 
9485 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9486 		   u8 addr_type)
9487 {
9488 	struct mgmt_ev_controller_resume ev;
9489 
9490 	ev.wake_reason = reason;
9491 	if (bdaddr) {
9492 		bacpy(&ev.addr.bdaddr, bdaddr);
9493 		ev.addr.type = addr_type;
9494 	} else {
9495 		memset(&ev.addr, 0, sizeof(ev.addr));
9496 	}
9497 
9498 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9499 }
9500 
9501 static struct hci_mgmt_chan chan = {
9502 	.channel	= HCI_CHANNEL_CONTROL,
9503 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9504 	.handlers	= mgmt_handlers,
9505 	.hdev_init	= mgmt_init_hdev,
9506 };
9507 
9508 int mgmt_init(void)
9509 {
9510 	return hci_mgmt_chan_register(&chan);
9511 }
9512 
9513 void mgmt_exit(void)
9514 {
9515 	hci_mgmt_chan_unregister(&chan);
9516 }
9517