xref: /linux/net/bluetooth/mgmt.c (revision 550ee90ac61c1f0cd987c68a9ac6c4c9833925d7)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 };
136 
137 static const u16 mgmt_events[] = {
138 	MGMT_EV_CONTROLLER_ERROR,
139 	MGMT_EV_INDEX_ADDED,
140 	MGMT_EV_INDEX_REMOVED,
141 	MGMT_EV_NEW_SETTINGS,
142 	MGMT_EV_CLASS_OF_DEV_CHANGED,
143 	MGMT_EV_LOCAL_NAME_CHANGED,
144 	MGMT_EV_NEW_LINK_KEY,
145 	MGMT_EV_NEW_LONG_TERM_KEY,
146 	MGMT_EV_DEVICE_CONNECTED,
147 	MGMT_EV_DEVICE_DISCONNECTED,
148 	MGMT_EV_CONNECT_FAILED,
149 	MGMT_EV_PIN_CODE_REQUEST,
150 	MGMT_EV_USER_CONFIRM_REQUEST,
151 	MGMT_EV_USER_PASSKEY_REQUEST,
152 	MGMT_EV_AUTH_FAILED,
153 	MGMT_EV_DEVICE_FOUND,
154 	MGMT_EV_DISCOVERING,
155 	MGMT_EV_DEVICE_BLOCKED,
156 	MGMT_EV_DEVICE_UNBLOCKED,
157 	MGMT_EV_DEVICE_UNPAIRED,
158 	MGMT_EV_PASSKEY_NOTIFY,
159 	MGMT_EV_NEW_IRK,
160 	MGMT_EV_NEW_CSRK,
161 	MGMT_EV_DEVICE_ADDED,
162 	MGMT_EV_DEVICE_REMOVED,
163 	MGMT_EV_NEW_CONN_PARAM,
164 	MGMT_EV_UNCONF_INDEX_ADDED,
165 	MGMT_EV_UNCONF_INDEX_REMOVED,
166 	MGMT_EV_NEW_CONFIG_OPTIONS,
167 	MGMT_EV_EXT_INDEX_ADDED,
168 	MGMT_EV_EXT_INDEX_REMOVED,
169 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
170 	MGMT_EV_ADVERTISING_ADDED,
171 	MGMT_EV_ADVERTISING_REMOVED,
172 	MGMT_EV_EXT_INFO_CHANGED,
173 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
174 	MGMT_EV_EXP_FEATURE_CHANGED,
175 	MGMT_EV_DEVICE_FLAGS_CHANGED,
176 	MGMT_EV_ADV_MONITOR_ADDED,
177 	MGMT_EV_ADV_MONITOR_REMOVED,
178 	MGMT_EV_CONTROLLER_SUSPEND,
179 	MGMT_EV_CONTROLLER_RESUME,
180 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
181 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
182 };
183 
184 static const u16 mgmt_untrusted_commands[] = {
185 	MGMT_OP_READ_INDEX_LIST,
186 	MGMT_OP_READ_INFO,
187 	MGMT_OP_READ_UNCONF_INDEX_LIST,
188 	MGMT_OP_READ_CONFIG_INFO,
189 	MGMT_OP_READ_EXT_INDEX_LIST,
190 	MGMT_OP_READ_EXT_INFO,
191 	MGMT_OP_READ_CONTROLLER_CAP,
192 	MGMT_OP_READ_EXP_FEATURES_INFO,
193 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
194 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
195 };
196 
197 static const u16 mgmt_untrusted_events[] = {
198 	MGMT_EV_INDEX_ADDED,
199 	MGMT_EV_INDEX_REMOVED,
200 	MGMT_EV_NEW_SETTINGS,
201 	MGMT_EV_CLASS_OF_DEV_CHANGED,
202 	MGMT_EV_LOCAL_NAME_CHANGED,
203 	MGMT_EV_UNCONF_INDEX_ADDED,
204 	MGMT_EV_UNCONF_INDEX_REMOVED,
205 	MGMT_EV_NEW_CONFIG_OPTIONS,
206 	MGMT_EV_EXT_INDEX_ADDED,
207 	MGMT_EV_EXT_INDEX_REMOVED,
208 	MGMT_EV_EXT_INFO_CHANGED,
209 	MGMT_EV_EXP_FEATURE_CHANGED,
210 };
211 
212 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
213 
214 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
215 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
216 
217 /* HCI to MGMT error code conversion table */
218 static const u8 mgmt_status_table[] = {
219 	MGMT_STATUS_SUCCESS,
220 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
221 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
222 	MGMT_STATUS_FAILED,		/* Hardware Failure */
223 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
224 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
225 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
226 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
227 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
228 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
230 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
231 	MGMT_STATUS_BUSY,		/* Command Disallowed */
232 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
233 	MGMT_STATUS_REJECTED,		/* Rejected Security */
234 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
235 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
236 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
237 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
238 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
239 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
240 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
241 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
242 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
243 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
244 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
245 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
246 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
247 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
248 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
249 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
250 	MGMT_STATUS_FAILED,		/* Unspecified Error */
251 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
252 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
253 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
254 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
255 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
256 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
257 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
258 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
259 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
260 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
261 	MGMT_STATUS_FAILED,		/* Transaction Collision */
262 	MGMT_STATUS_FAILED,		/* Reserved for future use */
263 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
264 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
265 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
266 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
267 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
268 	MGMT_STATUS_FAILED,		/* Reserved for future use */
269 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
270 	MGMT_STATUS_FAILED,		/* Reserved for future use */
271 	MGMT_STATUS_FAILED,		/* Slot Violation */
272 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
273 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
274 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
275 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
276 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
277 	MGMT_STATUS_BUSY,		/* Controller Busy */
278 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
279 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
280 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
281 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
282 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
283 };
284 
285 static u8 mgmt_errno_status(int err)
286 {
287 	switch (err) {
288 	case 0:
289 		return MGMT_STATUS_SUCCESS;
290 	case -EPERM:
291 		return MGMT_STATUS_REJECTED;
292 	case -EINVAL:
293 		return MGMT_STATUS_INVALID_PARAMS;
294 	case -EOPNOTSUPP:
295 		return MGMT_STATUS_NOT_SUPPORTED;
296 	case -EBUSY:
297 		return MGMT_STATUS_BUSY;
298 	case -ETIMEDOUT:
299 		return MGMT_STATUS_AUTH_FAILED;
300 	case -ENOMEM:
301 		return MGMT_STATUS_NO_RESOURCES;
302 	case -EISCONN:
303 		return MGMT_STATUS_ALREADY_CONNECTED;
304 	case -ENOTCONN:
305 		return MGMT_STATUS_DISCONNECTED;
306 	}
307 
308 	return MGMT_STATUS_FAILED;
309 }
310 
311 static u8 mgmt_status(int err)
312 {
313 	if (err < 0)
314 		return mgmt_errno_status(err);
315 
316 	if (err < ARRAY_SIZE(mgmt_status_table))
317 		return mgmt_status_table[err];
318 
319 	return MGMT_STATUS_FAILED;
320 }
321 
322 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
323 			    u16 len, int flag)
324 {
325 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 			       flag, NULL);
327 }
328 
329 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
330 			      u16 len, int flag, struct sock *skip_sk)
331 {
332 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 			       flag, skip_sk);
334 }
335 
336 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
337 		      struct sock *skip_sk)
338 {
339 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
340 			       HCI_SOCK_TRUSTED, skip_sk);
341 }
342 
343 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
344 {
345 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 				   skip_sk);
347 }
348 
349 static u8 le_addr_type(u8 mgmt_addr_type)
350 {
351 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
352 		return ADDR_LE_DEV_PUBLIC;
353 	else
354 		return ADDR_LE_DEV_RANDOM;
355 }
356 
357 void mgmt_fill_version_info(void *ver)
358 {
359 	struct mgmt_rp_read_version *rp = ver;
360 
361 	rp->version = MGMT_VERSION;
362 	rp->revision = cpu_to_le16(MGMT_REVISION);
363 }
364 
365 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
366 			u16 data_len)
367 {
368 	struct mgmt_rp_read_version rp;
369 
370 	bt_dev_dbg(hdev, "sock %p", sk);
371 
372 	mgmt_fill_version_info(&rp);
373 
374 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 				 &rp, sizeof(rp));
376 }
377 
378 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
379 			 u16 data_len)
380 {
381 	struct mgmt_rp_read_commands *rp;
382 	u16 num_commands, num_events;
383 	size_t rp_size;
384 	int i, err;
385 
386 	bt_dev_dbg(hdev, "sock %p", sk);
387 
388 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
389 		num_commands = ARRAY_SIZE(mgmt_commands);
390 		num_events = ARRAY_SIZE(mgmt_events);
391 	} else {
392 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
393 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
394 	}
395 
396 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
397 
398 	rp = kmalloc(rp_size, GFP_KERNEL);
399 	if (!rp)
400 		return -ENOMEM;
401 
402 	rp->num_commands = cpu_to_le16(num_commands);
403 	rp->num_events = cpu_to_le16(num_events);
404 
405 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
406 		__le16 *opcode = rp->opcodes;
407 
408 		for (i = 0; i < num_commands; i++, opcode++)
409 			put_unaligned_le16(mgmt_commands[i], opcode);
410 
411 		for (i = 0; i < num_events; i++, opcode++)
412 			put_unaligned_le16(mgmt_events[i], opcode);
413 	} else {
414 		__le16 *opcode = rp->opcodes;
415 
416 		for (i = 0; i < num_commands; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
418 
419 		for (i = 0; i < num_events; i++, opcode++)
420 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
421 	}
422 
423 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
424 				rp, rp_size);
425 	kfree(rp);
426 
427 	return err;
428 }
429 
430 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
431 			   u16 data_len)
432 {
433 	struct mgmt_rp_read_index_list *rp;
434 	struct hci_dev *d;
435 	size_t rp_len;
436 	u16 count;
437 	int err;
438 
439 	bt_dev_dbg(hdev, "sock %p", sk);
440 
441 	read_lock(&hci_dev_list_lock);
442 
443 	count = 0;
444 	list_for_each_entry(d, &hci_dev_list, list) {
445 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
446 			count++;
447 	}
448 
449 	rp_len = sizeof(*rp) + (2 * count);
450 	rp = kmalloc(rp_len, GFP_ATOMIC);
451 	if (!rp) {
452 		read_unlock(&hci_dev_list_lock);
453 		return -ENOMEM;
454 	}
455 
456 	count = 0;
457 	list_for_each_entry(d, &hci_dev_list, list) {
458 		if (hci_dev_test_flag(d, HCI_SETUP) ||
459 		    hci_dev_test_flag(d, HCI_CONFIG) ||
460 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 			continue;
462 
463 		/* Devices marked as raw-only are neither configured
464 		 * nor unconfigured controllers.
465 		 */
466 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 			continue;
468 
469 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
470 			rp->index[count++] = cpu_to_le16(d->id);
471 			bt_dev_dbg(hdev, "Added hci%u", d->id);
472 		}
473 	}
474 
475 	rp->num_controllers = cpu_to_le16(count);
476 	rp_len = sizeof(*rp) + (2 * count);
477 
478 	read_unlock(&hci_dev_list_lock);
479 
480 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
481 				0, rp, rp_len);
482 
483 	kfree(rp);
484 
485 	return err;
486 }
487 
488 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
489 				  void *data, u16 data_len)
490 {
491 	struct mgmt_rp_read_unconf_index_list *rp;
492 	struct hci_dev *d;
493 	size_t rp_len;
494 	u16 count;
495 	int err;
496 
497 	bt_dev_dbg(hdev, "sock %p", sk);
498 
499 	read_lock(&hci_dev_list_lock);
500 
501 	count = 0;
502 	list_for_each_entry(d, &hci_dev_list, list) {
503 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
528 			rp->index[count++] = cpu_to_le16(d->id);
529 			bt_dev_dbg(hdev, "Added hci%u", d->id);
530 		}
531 	}
532 
533 	rp->num_controllers = cpu_to_le16(count);
534 	rp_len = sizeof(*rp) + (2 * count);
535 
536 	read_unlock(&hci_dev_list_lock);
537 
538 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
540 
541 	kfree(rp);
542 
543 	return err;
544 }
545 
546 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
547 			       void *data, u16 data_len)
548 {
549 	struct mgmt_rp_read_ext_index_list *rp;
550 	struct hci_dev *d;
551 	u16 count;
552 	int err;
553 
554 	bt_dev_dbg(hdev, "sock %p", sk);
555 
556 	read_lock(&hci_dev_list_lock);
557 
558 	count = 0;
559 	list_for_each_entry(d, &hci_dev_list, list)
560 		count++;
561 
562 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
563 	if (!rp) {
564 		read_unlock(&hci_dev_list_lock);
565 		return -ENOMEM;
566 	}
567 
568 	count = 0;
569 	list_for_each_entry(d, &hci_dev_list, list) {
570 		if (hci_dev_test_flag(d, HCI_SETUP) ||
571 		    hci_dev_test_flag(d, HCI_CONFIG) ||
572 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
573 			continue;
574 
575 		/* Devices marked as raw-only are neither configured
576 		 * nor unconfigured controllers.
577 		 */
578 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
579 			continue;
580 
581 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
582 			rp->entry[count].type = 0x01;
583 		else
584 			rp->entry[count].type = 0x00;
585 
586 		rp->entry[count].bus = d->bus;
587 		rp->entry[count++].index = cpu_to_le16(d->id);
588 		bt_dev_dbg(hdev, "Added hci%u", d->id);
589 	}
590 
591 	rp->num_controllers = cpu_to_le16(count);
592 
593 	read_unlock(&hci_dev_list_lock);
594 
595 	/* If this command is called at least once, then all the
596 	 * default index and unconfigured index events are disabled
597 	 * and from now on only extended index events are used.
598 	 */
599 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602 
603 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 				struct_size(rp, entry, count));
606 
607 	kfree(rp);
608 
609 	return err;
610 }
611 
612 static bool is_configured(struct hci_dev *hdev)
613 {
614 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 		return false;
617 
618 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
621 		return false;
622 
623 	return true;
624 }
625 
626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 	u32 options = 0;
629 
630 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
633 
634 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
637 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
638 
639 	return cpu_to_le32(options);
640 }
641 
642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 	__le32 options = get_missing_options(hdev);
645 
646 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649 
650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 	__le32 options = get_missing_options(hdev);
653 
654 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 				 sizeof(options));
656 }
657 
658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 			    void *data, u16 data_len)
660 {
661 	struct mgmt_rp_read_config_info rp;
662 	u32 options = 0;
663 
664 	bt_dev_dbg(hdev, "sock %p", sk);
665 
666 	hci_dev_lock(hdev);
667 
668 	memset(&rp, 0, sizeof(rp));
669 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670 
671 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
673 
674 	if (hdev->set_bdaddr)
675 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
676 
677 	rp.supported_options = cpu_to_le32(options);
678 	rp.missing_options = get_missing_options(hdev);
679 
680 	hci_dev_unlock(hdev);
681 
682 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 				 &rp, sizeof(rp));
684 }
685 
686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 	u32 supported_phys = 0;
689 
690 	if (lmp_bredr_capable(hdev)) {
691 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692 
693 		if (hdev->features[0][0] & LMP_3SLOT)
694 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695 
696 		if (hdev->features[0][0] & LMP_5SLOT)
697 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698 
699 		if (lmp_edr_2m_capable(hdev)) {
700 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701 
702 			if (lmp_edr_3slot_capable(hdev))
703 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704 
705 			if (lmp_edr_5slot_capable(hdev))
706 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707 
708 			if (lmp_edr_3m_capable(hdev)) {
709 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710 
711 				if (lmp_edr_3slot_capable(hdev))
712 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713 
714 				if (lmp_edr_5slot_capable(hdev))
715 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 			}
717 		}
718 	}
719 
720 	if (lmp_le_capable(hdev)) {
721 		supported_phys |= MGMT_PHY_LE_1M_TX;
722 		supported_phys |= MGMT_PHY_LE_1M_RX;
723 
724 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 			supported_phys |= MGMT_PHY_LE_2M_TX;
726 			supported_phys |= MGMT_PHY_LE_2M_RX;
727 		}
728 
729 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 			supported_phys |= MGMT_PHY_LE_CODED_TX;
731 			supported_phys |= MGMT_PHY_LE_CODED_RX;
732 		}
733 	}
734 
735 	return supported_phys;
736 }
737 
738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 	u32 selected_phys = 0;
741 
742 	if (lmp_bredr_capable(hdev)) {
743 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744 
745 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747 
748 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750 
751 		if (lmp_edr_2m_capable(hdev)) {
752 			if (!(hdev->pkt_type & HCI_2DH1))
753 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754 
755 			if (lmp_edr_3slot_capable(hdev) &&
756 			    !(hdev->pkt_type & HCI_2DH3))
757 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758 
759 			if (lmp_edr_5slot_capable(hdev) &&
760 			    !(hdev->pkt_type & HCI_2DH5))
761 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762 
763 			if (lmp_edr_3m_capable(hdev)) {
764 				if (!(hdev->pkt_type & HCI_3DH1))
765 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766 
767 				if (lmp_edr_3slot_capable(hdev) &&
768 				    !(hdev->pkt_type & HCI_3DH3))
769 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770 
771 				if (lmp_edr_5slot_capable(hdev) &&
772 				    !(hdev->pkt_type & HCI_3DH5))
773 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 			}
775 		}
776 	}
777 
778 	if (lmp_le_capable(hdev)) {
779 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 			selected_phys |= MGMT_PHY_LE_1M_TX;
781 
782 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 			selected_phys |= MGMT_PHY_LE_1M_RX;
784 
785 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 			selected_phys |= MGMT_PHY_LE_2M_TX;
787 
788 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 			selected_phys |= MGMT_PHY_LE_2M_RX;
790 
791 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 			selected_phys |= MGMT_PHY_LE_CODED_TX;
793 
794 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 			selected_phys |= MGMT_PHY_LE_CODED_RX;
796 	}
797 
798 	return selected_phys;
799 }
800 
801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806 
807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 	u32 settings = 0;
810 
811 	settings |= MGMT_SETTING_POWERED;
812 	settings |= MGMT_SETTING_BONDABLE;
813 	settings |= MGMT_SETTING_DEBUG_KEYS;
814 	settings |= MGMT_SETTING_CONNECTABLE;
815 	settings |= MGMT_SETTING_DISCOVERABLE;
816 
817 	if (lmp_bredr_capable(hdev)) {
818 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 		settings |= MGMT_SETTING_BREDR;
821 		settings |= MGMT_SETTING_LINK_SECURITY;
822 
823 		if (lmp_ssp_capable(hdev)) {
824 			settings |= MGMT_SETTING_SSP;
825 		}
826 
827 		if (lmp_sc_capable(hdev))
828 			settings |= MGMT_SETTING_SECURE_CONN;
829 
830 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
831 			     &hdev->quirks))
832 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 	}
834 
835 	if (lmp_le_capable(hdev)) {
836 		settings |= MGMT_SETTING_LE;
837 		settings |= MGMT_SETTING_SECURE_CONN;
838 		settings |= MGMT_SETTING_PRIVACY;
839 		settings |= MGMT_SETTING_STATIC_ADDRESS;
840 		settings |= MGMT_SETTING_ADVERTISING;
841 	}
842 
843 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
844 	    hdev->set_bdaddr)
845 		settings |= MGMT_SETTING_CONFIGURATION;
846 
847 	if (cis_central_capable(hdev))
848 		settings |= MGMT_SETTING_CIS_CENTRAL;
849 
850 	if (cis_peripheral_capable(hdev))
851 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
852 
853 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
854 
855 	return settings;
856 }
857 
858 static u32 get_current_settings(struct hci_dev *hdev)
859 {
860 	u32 settings = 0;
861 
862 	if (hdev_is_powered(hdev))
863 		settings |= MGMT_SETTING_POWERED;
864 
865 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
866 		settings |= MGMT_SETTING_CONNECTABLE;
867 
868 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
869 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
870 
871 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
872 		settings |= MGMT_SETTING_DISCOVERABLE;
873 
874 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
875 		settings |= MGMT_SETTING_BONDABLE;
876 
877 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
878 		settings |= MGMT_SETTING_BREDR;
879 
880 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
881 		settings |= MGMT_SETTING_LE;
882 
883 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
884 		settings |= MGMT_SETTING_LINK_SECURITY;
885 
886 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
887 		settings |= MGMT_SETTING_SSP;
888 
889 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 		settings |= MGMT_SETTING_ADVERTISING;
891 
892 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 		settings |= MGMT_SETTING_SECURE_CONN;
894 
895 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 		settings |= MGMT_SETTING_DEBUG_KEYS;
897 
898 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 		settings |= MGMT_SETTING_PRIVACY;
900 
901 	/* The current setting for static address has two purposes. The
902 	 * first is to indicate if the static address will be used and
903 	 * the second is to indicate if it is actually set.
904 	 *
905 	 * This means if the static address is not configured, this flag
906 	 * will never be set. If the address is configured, then if the
907 	 * address is actually used decides if the flag is set or not.
908 	 *
909 	 * For single mode LE only controllers and dual-mode controllers
910 	 * with BR/EDR disabled, the existence of the static address will
911 	 * be evaluated.
912 	 */
913 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 			settings |= MGMT_SETTING_STATIC_ADDRESS;
918 	}
919 
920 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922 
923 	if (cis_central_capable(hdev))
924 		settings |= MGMT_SETTING_CIS_CENTRAL;
925 
926 	if (cis_peripheral_capable(hdev))
927 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
928 
929 	if (bis_capable(hdev))
930 		settings |= MGMT_SETTING_ISO_BROADCASTER;
931 
932 	if (sync_recv_capable(hdev))
933 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
934 
935 	return settings;
936 }
937 
938 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
939 {
940 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
941 }
942 
943 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
944 {
945 	struct mgmt_pending_cmd *cmd;
946 
947 	/* If there's a pending mgmt command the flags will not yet have
948 	 * their final values, so check for this first.
949 	 */
950 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
951 	if (cmd) {
952 		struct mgmt_mode *cp = cmd->param;
953 		if (cp->val == 0x01)
954 			return LE_AD_GENERAL;
955 		else if (cp->val == 0x02)
956 			return LE_AD_LIMITED;
957 	} else {
958 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
959 			return LE_AD_LIMITED;
960 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
961 			return LE_AD_GENERAL;
962 	}
963 
964 	return 0;
965 }
966 
967 bool mgmt_get_connectable(struct hci_dev *hdev)
968 {
969 	struct mgmt_pending_cmd *cmd;
970 
971 	/* If there's a pending mgmt command the flag will not yet have
972 	 * it's final value, so check for this first.
973 	 */
974 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
975 	if (cmd) {
976 		struct mgmt_mode *cp = cmd->param;
977 
978 		return cp->val;
979 	}
980 
981 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
982 }
983 
984 static int service_cache_sync(struct hci_dev *hdev, void *data)
985 {
986 	hci_update_eir_sync(hdev);
987 	hci_update_class_sync(hdev);
988 
989 	return 0;
990 }
991 
992 static void service_cache_off(struct work_struct *work)
993 {
994 	struct hci_dev *hdev = container_of(work, struct hci_dev,
995 					    service_cache.work);
996 
997 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
998 		return;
999 
1000 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1001 }
1002 
1003 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1004 {
1005 	/* The generation of a new RPA and programming it into the
1006 	 * controller happens in the hci_req_enable_advertising()
1007 	 * function.
1008 	 */
1009 	if (ext_adv_capable(hdev))
1010 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1011 	else
1012 		return hci_enable_advertising_sync(hdev);
1013 }
1014 
1015 static void rpa_expired(struct work_struct *work)
1016 {
1017 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1018 					    rpa_expired.work);
1019 
1020 	bt_dev_dbg(hdev, "");
1021 
1022 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1023 
1024 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1025 		return;
1026 
1027 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1028 }
1029 
1030 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1031 
1032 static void discov_off(struct work_struct *work)
1033 {
1034 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1035 					    discov_off.work);
1036 
1037 	bt_dev_dbg(hdev, "");
1038 
1039 	hci_dev_lock(hdev);
1040 
1041 	/* When discoverable timeout triggers, then just make sure
1042 	 * the limited discoverable flag is cleared. Even in the case
1043 	 * of a timeout triggered from general discoverable, it is
1044 	 * safe to unconditionally clear the flag.
1045 	 */
1046 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1047 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1048 	hdev->discov_timeout = 0;
1049 
1050 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1051 
1052 	mgmt_new_settings(hdev);
1053 
1054 	hci_dev_unlock(hdev);
1055 }
1056 
1057 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1058 
1059 static void mesh_send_complete(struct hci_dev *hdev,
1060 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1061 {
1062 	u8 handle = mesh_tx->handle;
1063 
1064 	if (!silent)
1065 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1066 			   sizeof(handle), NULL);
1067 
1068 	mgmt_mesh_remove(mesh_tx);
1069 }
1070 
1071 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1072 {
1073 	struct mgmt_mesh_tx *mesh_tx;
1074 
1075 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1076 	hci_disable_advertising_sync(hdev);
1077 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1078 
1079 	if (mesh_tx)
1080 		mesh_send_complete(hdev, mesh_tx, false);
1081 
1082 	return 0;
1083 }
1084 
1085 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1086 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1087 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1088 {
1089 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1090 
1091 	if (!mesh_tx)
1092 		return;
1093 
1094 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1095 				 mesh_send_start_complete);
1096 
1097 	if (err < 0)
1098 		mesh_send_complete(hdev, mesh_tx, false);
1099 	else
1100 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1101 }
1102 
1103 static void mesh_send_done(struct work_struct *work)
1104 {
1105 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1106 					    mesh_send_done.work);
1107 
1108 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1109 		return;
1110 
1111 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1112 }
1113 
1114 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1115 {
1116 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1117 		return;
1118 
1119 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1120 
1121 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1122 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1123 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1124 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1125 
1126 	/* Non-mgmt controlled devices get this bit set
1127 	 * implicitly so that pairing works for them, however
1128 	 * for mgmt we require user-space to explicitly enable
1129 	 * it
1130 	 */
1131 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1132 
1133 	hci_dev_set_flag(hdev, HCI_MGMT);
1134 }
1135 
1136 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1137 				void *data, u16 data_len)
1138 {
1139 	struct mgmt_rp_read_info rp;
1140 
1141 	bt_dev_dbg(hdev, "sock %p", sk);
1142 
1143 	hci_dev_lock(hdev);
1144 
1145 	memset(&rp, 0, sizeof(rp));
1146 
1147 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1148 
1149 	rp.version = hdev->hci_ver;
1150 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1151 
1152 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1153 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1154 
1155 	memcpy(rp.dev_class, hdev->dev_class, 3);
1156 
1157 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1158 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1159 
1160 	hci_dev_unlock(hdev);
1161 
1162 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1163 				 sizeof(rp));
1164 }
1165 
1166 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1167 {
1168 	u16 eir_len = 0;
1169 	size_t name_len;
1170 
1171 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1172 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1173 					  hdev->dev_class, 3);
1174 
1175 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1176 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1177 					  hdev->appearance);
1178 
1179 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1180 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1181 				  hdev->dev_name, name_len);
1182 
1183 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1184 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1185 				  hdev->short_name, name_len);
1186 
1187 	return eir_len;
1188 }
1189 
1190 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1191 				    void *data, u16 data_len)
1192 {
1193 	char buf[512];
1194 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1195 	u16 eir_len;
1196 
1197 	bt_dev_dbg(hdev, "sock %p", sk);
1198 
1199 	memset(&buf, 0, sizeof(buf));
1200 
1201 	hci_dev_lock(hdev);
1202 
1203 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1204 
1205 	rp->version = hdev->hci_ver;
1206 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1207 
1208 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1209 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1210 
1211 
1212 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1213 	rp->eir_len = cpu_to_le16(eir_len);
1214 
1215 	hci_dev_unlock(hdev);
1216 
1217 	/* If this command is called at least once, then the events
1218 	 * for class of device and local name changes are disabled
1219 	 * and only the new extended controller information event
1220 	 * is used.
1221 	 */
1222 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1223 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1225 
1226 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1227 				 sizeof(*rp) + eir_len);
1228 }
1229 
1230 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1231 {
1232 	char buf[512];
1233 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1234 	u16 eir_len;
1235 
1236 	memset(buf, 0, sizeof(buf));
1237 
1238 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1239 	ev->eir_len = cpu_to_le16(eir_len);
1240 
1241 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1242 				  sizeof(*ev) + eir_len,
1243 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1244 }
1245 
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1249 
1250 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 				 sizeof(settings));
1252 }
1253 
1254 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1255 {
1256 	struct mgmt_ev_advertising_added ev;
1257 
1258 	ev.instance = instance;
1259 
1260 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1261 }
1262 
1263 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1264 			      u8 instance)
1265 {
1266 	struct mgmt_ev_advertising_removed ev;
1267 
1268 	ev.instance = instance;
1269 
1270 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1271 }
1272 
1273 static void cancel_adv_timeout(struct hci_dev *hdev)
1274 {
1275 	if (hdev->adv_instance_timeout) {
1276 		hdev->adv_instance_timeout = 0;
1277 		cancel_delayed_work(&hdev->adv_instance_expire);
1278 	}
1279 }
1280 
1281 /* This function requires the caller holds hdev->lock */
1282 static void restart_le_actions(struct hci_dev *hdev)
1283 {
1284 	struct hci_conn_params *p;
1285 
1286 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1287 		/* Needed for AUTO_OFF case where might not "really"
1288 		 * have been powered off.
1289 		 */
1290 		hci_pend_le_list_del_init(p);
1291 
1292 		switch (p->auto_connect) {
1293 		case HCI_AUTO_CONN_DIRECT:
1294 		case HCI_AUTO_CONN_ALWAYS:
1295 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1296 			break;
1297 		case HCI_AUTO_CONN_REPORT:
1298 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1299 			break;
1300 		default:
1301 			break;
1302 		}
1303 	}
1304 }
1305 
1306 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1307 {
1308 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1309 
1310 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1311 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1312 }
1313 
1314 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1315 {
1316 	struct mgmt_pending_cmd *cmd = data;
1317 	struct mgmt_mode *cp;
1318 
1319 	/* Make sure cmd still outstanding. */
1320 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1321 		return;
1322 
1323 	cp = cmd->param;
1324 
1325 	bt_dev_dbg(hdev, "err %d", err);
1326 
1327 	if (!err) {
1328 		if (cp->val) {
1329 			hci_dev_lock(hdev);
1330 			restart_le_actions(hdev);
1331 			hci_update_passive_scan(hdev);
1332 			hci_dev_unlock(hdev);
1333 		}
1334 
1335 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1336 
1337 		/* Only call new_setting for power on as power off is deferred
1338 		 * to hdev->power_off work which does call hci_dev_do_close.
1339 		 */
1340 		if (cp->val)
1341 			new_settings(hdev, cmd->sk);
1342 	} else {
1343 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1344 				mgmt_status(err));
1345 	}
1346 
1347 	mgmt_pending_remove(cmd);
1348 }
1349 
1350 static int set_powered_sync(struct hci_dev *hdev, void *data)
1351 {
1352 	struct mgmt_pending_cmd *cmd = data;
1353 	struct mgmt_mode *cp = cmd->param;
1354 
1355 	BT_DBG("%s", hdev->name);
1356 
1357 	return hci_set_powered_sync(hdev, cp->val);
1358 }
1359 
1360 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1361 		       u16 len)
1362 {
1363 	struct mgmt_mode *cp = data;
1364 	struct mgmt_pending_cmd *cmd;
1365 	int err;
1366 
1367 	bt_dev_dbg(hdev, "sock %p", sk);
1368 
1369 	if (cp->val != 0x00 && cp->val != 0x01)
1370 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1371 				       MGMT_STATUS_INVALID_PARAMS);
1372 
1373 	hci_dev_lock(hdev);
1374 
1375 	if (!cp->val) {
1376 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1377 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 					      MGMT_STATUS_BUSY);
1379 			goto failed;
1380 		}
1381 	}
1382 
1383 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				      MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (!!cp->val == hdev_is_powered(hdev)) {
1390 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 		goto failed;
1392 	}
1393 
1394 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 	if (!cmd) {
1396 		err = -ENOMEM;
1397 		goto failed;
1398 	}
1399 
1400 	/* Cancel potentially blocking sync operation before power off */
1401 	if (cp->val == 0x00) {
1402 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 					 mgmt_set_powered_complete);
1405 	} else {
1406 		/* Use hci_cmd_sync_submit since hdev might not be running */
1407 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 					  mgmt_set_powered_complete);
1409 	}
1410 
1411 	if (err < 0)
1412 		mgmt_pending_remove(cmd);
1413 
1414 failed:
1415 	hci_dev_unlock(hdev);
1416 	return err;
1417 }
1418 
1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 	return new_settings(hdev, NULL);
1422 }
1423 
1424 struct cmd_lookup {
1425 	struct sock *sk;
1426 	struct hci_dev *hdev;
1427 	u8 mgmt_status;
1428 };
1429 
1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 	struct cmd_lookup *match = data;
1433 
1434 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435 
1436 	list_del(&cmd->list);
1437 
1438 	if (match->sk == NULL) {
1439 		match->sk = cmd->sk;
1440 		sock_hold(match->sk);
1441 	}
1442 
1443 	mgmt_pending_free(cmd);
1444 }
1445 
1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 	u8 *status = data;
1449 
1450 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 	mgmt_pending_remove(cmd);
1452 }
1453 
1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 	if (cmd->cmd_complete) {
1457 		u8 *status = data;
1458 
1459 		cmd->cmd_complete(cmd, *status);
1460 		mgmt_pending_remove(cmd);
1461 
1462 		return;
1463 	}
1464 
1465 	cmd_status_rsp(cmd, data);
1466 }
1467 
1468 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469 {
1470 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 				 cmd->param, cmd->param_len);
1472 }
1473 
1474 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475 {
1476 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1477 				 cmd->param, sizeof(struct mgmt_addr_info));
1478 }
1479 
1480 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1481 {
1482 	if (!lmp_bredr_capable(hdev))
1483 		return MGMT_STATUS_NOT_SUPPORTED;
1484 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1485 		return MGMT_STATUS_REJECTED;
1486 	else
1487 		return MGMT_STATUS_SUCCESS;
1488 }
1489 
1490 static u8 mgmt_le_support(struct hci_dev *hdev)
1491 {
1492 	if (!lmp_le_capable(hdev))
1493 		return MGMT_STATUS_NOT_SUPPORTED;
1494 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1495 		return MGMT_STATUS_REJECTED;
1496 	else
1497 		return MGMT_STATUS_SUCCESS;
1498 }
1499 
1500 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 					   int err)
1502 {
1503 	struct mgmt_pending_cmd *cmd = data;
1504 
1505 	bt_dev_dbg(hdev, "err %d", err);
1506 
1507 	/* Make sure cmd still outstanding. */
1508 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1509 		return;
1510 
1511 	hci_dev_lock(hdev);
1512 
1513 	if (err) {
1514 		u8 mgmt_err = mgmt_status(err);
1515 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1516 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1517 		goto done;
1518 	}
1519 
1520 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1521 	    hdev->discov_timeout > 0) {
1522 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1523 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 	}
1525 
1526 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1527 	new_settings(hdev, cmd->sk);
1528 
1529 done:
1530 	mgmt_pending_remove(cmd);
1531 	hci_dev_unlock(hdev);
1532 }
1533 
1534 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1535 {
1536 	BT_DBG("%s", hdev->name);
1537 
1538 	return hci_update_discoverable_sync(hdev);
1539 }
1540 
1541 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 			    u16 len)
1543 {
1544 	struct mgmt_cp_set_discoverable *cp = data;
1545 	struct mgmt_pending_cmd *cmd;
1546 	u16 timeout;
1547 	int err;
1548 
1549 	bt_dev_dbg(hdev, "sock %p", sk);
1550 
1551 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1552 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1553 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1554 				       MGMT_STATUS_REJECTED);
1555 
1556 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1557 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1558 				       MGMT_STATUS_INVALID_PARAMS);
1559 
1560 	timeout = __le16_to_cpu(cp->timeout);
1561 
1562 	/* Disabling discoverable requires that no timeout is set,
1563 	 * and enabling limited discoverable requires a timeout.
1564 	 */
1565 	if ((cp->val == 0x00 && timeout > 0) ||
1566 	    (cp->val == 0x02 && timeout == 0))
1567 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 				       MGMT_STATUS_INVALID_PARAMS);
1569 
1570 	hci_dev_lock(hdev);
1571 
1572 	if (!hdev_is_powered(hdev) && timeout > 0) {
1573 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				      MGMT_STATUS_NOT_POWERED);
1575 		goto failed;
1576 	}
1577 
1578 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1579 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 				      MGMT_STATUS_BUSY);
1582 		goto failed;
1583 	}
1584 
1585 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_REJECTED);
1588 		goto failed;
1589 	}
1590 
1591 	if (hdev->advertising_paused) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 				      MGMT_STATUS_BUSY);
1594 		goto failed;
1595 	}
1596 
1597 	if (!hdev_is_powered(hdev)) {
1598 		bool changed = false;
1599 
1600 		/* Setting limited discoverable when powered off is
1601 		 * not a valid operation since it requires a timeout
1602 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1603 		 */
1604 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1605 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1606 			changed = true;
1607 		}
1608 
1609 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1610 		if (err < 0)
1611 			goto failed;
1612 
1613 		if (changed)
1614 			err = new_settings(hdev, sk);
1615 
1616 		goto failed;
1617 	}
1618 
1619 	/* If the current mode is the same, then just update the timeout
1620 	 * value with the new value. And if only the timeout gets updated,
1621 	 * then no need for any HCI transactions.
1622 	 */
1623 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1624 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1625 						   HCI_LIMITED_DISCOVERABLE)) {
1626 		cancel_delayed_work(&hdev->discov_off);
1627 		hdev->discov_timeout = timeout;
1628 
1629 		if (cp->val && hdev->discov_timeout > 0) {
1630 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1631 			queue_delayed_work(hdev->req_workqueue,
1632 					   &hdev->discov_off, to);
1633 		}
1634 
1635 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1636 		goto failed;
1637 	}
1638 
1639 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1640 	if (!cmd) {
1641 		err = -ENOMEM;
1642 		goto failed;
1643 	}
1644 
1645 	/* Cancel any potential discoverable timeout that might be
1646 	 * still active and store new timeout value. The arming of
1647 	 * the timeout happens in the complete handler.
1648 	 */
1649 	cancel_delayed_work(&hdev->discov_off);
1650 	hdev->discov_timeout = timeout;
1651 
1652 	if (cp->val)
1653 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1654 	else
1655 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1656 
1657 	/* Limited discoverable mode */
1658 	if (cp->val == 0x02)
1659 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1660 	else
1661 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662 
1663 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1664 				 mgmt_set_discoverable_complete);
1665 
1666 	if (err < 0)
1667 		mgmt_pending_remove(cmd);
1668 
1669 failed:
1670 	hci_dev_unlock(hdev);
1671 	return err;
1672 }
1673 
1674 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 					  int err)
1676 {
1677 	struct mgmt_pending_cmd *cmd = data;
1678 
1679 	bt_dev_dbg(hdev, "err %d", err);
1680 
1681 	/* Make sure cmd still outstanding. */
1682 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1683 		return;
1684 
1685 	hci_dev_lock(hdev);
1686 
1687 	if (err) {
1688 		u8 mgmt_err = mgmt_status(err);
1689 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1690 		goto done;
1691 	}
1692 
1693 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1694 	new_settings(hdev, cmd->sk);
1695 
1696 done:
1697 	mgmt_pending_remove(cmd);
1698 
1699 	hci_dev_unlock(hdev);
1700 }
1701 
1702 static int set_connectable_update_settings(struct hci_dev *hdev,
1703 					   struct sock *sk, u8 val)
1704 {
1705 	bool changed = false;
1706 	int err;
1707 
1708 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1709 		changed = true;
1710 
1711 	if (val) {
1712 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 	} else {
1714 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1715 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1716 	}
1717 
1718 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1719 	if (err < 0)
1720 		return err;
1721 
1722 	if (changed) {
1723 		hci_update_scan(hdev);
1724 		hci_update_passive_scan(hdev);
1725 		return new_settings(hdev, sk);
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732 {
1733 	BT_DBG("%s", hdev->name);
1734 
1735 	return hci_update_connectable_sync(hdev);
1736 }
1737 
1738 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1739 			   u16 len)
1740 {
1741 	struct mgmt_mode *cp = data;
1742 	struct mgmt_pending_cmd *cmd;
1743 	int err;
1744 
1745 	bt_dev_dbg(hdev, "sock %p", sk);
1746 
1747 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1748 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1749 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1750 				       MGMT_STATUS_REJECTED);
1751 
1752 	if (cp->val != 0x00 && cp->val != 0x01)
1753 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1754 				       MGMT_STATUS_INVALID_PARAMS);
1755 
1756 	hci_dev_lock(hdev);
1757 
1758 	if (!hdev_is_powered(hdev)) {
1759 		err = set_connectable_update_settings(hdev, sk, cp->val);
1760 		goto failed;
1761 	}
1762 
1763 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1764 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1765 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1766 				      MGMT_STATUS_BUSY);
1767 		goto failed;
1768 	}
1769 
1770 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1771 	if (!cmd) {
1772 		err = -ENOMEM;
1773 		goto failed;
1774 	}
1775 
1776 	if (cp->val) {
1777 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 	} else {
1779 		if (hdev->discov_timeout > 0)
1780 			cancel_delayed_work(&hdev->discov_off);
1781 
1782 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1783 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1784 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1785 	}
1786 
1787 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1788 				 mgmt_set_connectable_complete);
1789 
1790 	if (err < 0)
1791 		mgmt_pending_remove(cmd);
1792 
1793 failed:
1794 	hci_dev_unlock(hdev);
1795 	return err;
1796 }
1797 
1798 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1799 			u16 len)
1800 {
1801 	struct mgmt_mode *cp = data;
1802 	bool changed;
1803 	int err;
1804 
1805 	bt_dev_dbg(hdev, "sock %p", sk);
1806 
1807 	if (cp->val != 0x00 && cp->val != 0x01)
1808 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1809 				       MGMT_STATUS_INVALID_PARAMS);
1810 
1811 	hci_dev_lock(hdev);
1812 
1813 	if (cp->val)
1814 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 	else
1816 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817 
1818 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1819 	if (err < 0)
1820 		goto unlock;
1821 
1822 	if (changed) {
1823 		/* In limited privacy mode the change of bondable mode
1824 		 * may affect the local advertising address.
1825 		 */
1826 		hci_update_discoverable(hdev);
1827 
1828 		err = new_settings(hdev, sk);
1829 	}
1830 
1831 unlock:
1832 	hci_dev_unlock(hdev);
1833 	return err;
1834 }
1835 
1836 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1837 			     u16 len)
1838 {
1839 	struct mgmt_mode *cp = data;
1840 	struct mgmt_pending_cmd *cmd;
1841 	u8 val, status;
1842 	int err;
1843 
1844 	bt_dev_dbg(hdev, "sock %p", sk);
1845 
1846 	status = mgmt_bredr_support(hdev);
1847 	if (status)
1848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1849 				       status);
1850 
1851 	if (cp->val != 0x00 && cp->val != 0x01)
1852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1853 				       MGMT_STATUS_INVALID_PARAMS);
1854 
1855 	hci_dev_lock(hdev);
1856 
1857 	if (!hdev_is_powered(hdev)) {
1858 		bool changed = false;
1859 
1860 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1861 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1862 			changed = true;
1863 		}
1864 
1865 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1866 		if (err < 0)
1867 			goto failed;
1868 
1869 		if (changed)
1870 			err = new_settings(hdev, sk);
1871 
1872 		goto failed;
1873 	}
1874 
1875 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1876 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1877 				      MGMT_STATUS_BUSY);
1878 		goto failed;
1879 	}
1880 
1881 	val = !!cp->val;
1882 
1883 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1884 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1885 		goto failed;
1886 	}
1887 
1888 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1889 	if (!cmd) {
1890 		err = -ENOMEM;
1891 		goto failed;
1892 	}
1893 
1894 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 	if (err < 0) {
1896 		mgmt_pending_remove(cmd);
1897 		goto failed;
1898 	}
1899 
1900 failed:
1901 	hci_dev_unlock(hdev);
1902 	return err;
1903 }
1904 
1905 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906 {
1907 	struct cmd_lookup match = { NULL, hdev };
1908 	struct mgmt_pending_cmd *cmd = data;
1909 	struct mgmt_mode *cp = cmd->param;
1910 	u8 enable = cp->val;
1911 	bool changed;
1912 
1913 	/* Make sure cmd still outstanding. */
1914 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1915 		return;
1916 
1917 	if (err) {
1918 		u8 mgmt_err = mgmt_status(err);
1919 
1920 		if (enable && hci_dev_test_and_clear_flag(hdev,
1921 							  HCI_SSP_ENABLED)) {
1922 			new_settings(hdev, NULL);
1923 		}
1924 
1925 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1926 				     &mgmt_err);
1927 		return;
1928 	}
1929 
1930 	if (enable) {
1931 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1932 	} else {
1933 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1934 	}
1935 
1936 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1937 
1938 	if (changed)
1939 		new_settings(hdev, match.sk);
1940 
1941 	if (match.sk)
1942 		sock_put(match.sk);
1943 
1944 	hci_update_eir_sync(hdev);
1945 }
1946 
1947 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1948 {
1949 	struct mgmt_pending_cmd *cmd = data;
1950 	struct mgmt_mode *cp = cmd->param;
1951 	bool changed = false;
1952 	int err;
1953 
1954 	if (cp->val)
1955 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956 
1957 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1958 
1959 	if (!err && changed)
1960 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1961 
1962 	return err;
1963 }
1964 
1965 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1966 {
1967 	struct mgmt_mode *cp = data;
1968 	struct mgmt_pending_cmd *cmd;
1969 	u8 status;
1970 	int err;
1971 
1972 	bt_dev_dbg(hdev, "sock %p", sk);
1973 
1974 	status = mgmt_bredr_support(hdev);
1975 	if (status)
1976 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1977 
1978 	if (!lmp_ssp_capable(hdev))
1979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1980 				       MGMT_STATUS_NOT_SUPPORTED);
1981 
1982 	if (cp->val != 0x00 && cp->val != 0x01)
1983 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1984 				       MGMT_STATUS_INVALID_PARAMS);
1985 
1986 	hci_dev_lock(hdev);
1987 
1988 	if (!hdev_is_powered(hdev)) {
1989 		bool changed;
1990 
1991 		if (cp->val) {
1992 			changed = !hci_dev_test_and_set_flag(hdev,
1993 							     HCI_SSP_ENABLED);
1994 		} else {
1995 			changed = hci_dev_test_and_clear_flag(hdev,
1996 							      HCI_SSP_ENABLED);
1997 		}
1998 
1999 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2000 		if (err < 0)
2001 			goto failed;
2002 
2003 		if (changed)
2004 			err = new_settings(hdev, sk);
2005 
2006 		goto failed;
2007 	}
2008 
2009 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2010 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2011 				      MGMT_STATUS_BUSY);
2012 		goto failed;
2013 	}
2014 
2015 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2016 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2017 		goto failed;
2018 	}
2019 
2020 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2021 	if (!cmd)
2022 		err = -ENOMEM;
2023 	else
2024 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2025 					 set_ssp_complete);
2026 
2027 	if (err < 0) {
2028 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2029 				      MGMT_STATUS_FAILED);
2030 
2031 		if (cmd)
2032 			mgmt_pending_remove(cmd);
2033 	}
2034 
2035 failed:
2036 	hci_dev_unlock(hdev);
2037 	return err;
2038 }
2039 
2040 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2041 {
2042 	bt_dev_dbg(hdev, "sock %p", sk);
2043 
2044 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2045 				       MGMT_STATUS_NOT_SUPPORTED);
2046 }
2047 
2048 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2049 {
2050 	struct cmd_lookup match = { NULL, hdev };
2051 	u8 status = mgmt_status(err);
2052 
2053 	bt_dev_dbg(hdev, "err %d", err);
2054 
2055 	if (status) {
2056 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2057 							&status);
2058 		return;
2059 	}
2060 
2061 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2062 
2063 	new_settings(hdev, match.sk);
2064 
2065 	if (match.sk)
2066 		sock_put(match.sk);
2067 }
2068 
2069 static int set_le_sync(struct hci_dev *hdev, void *data)
2070 {
2071 	struct mgmt_pending_cmd *cmd = data;
2072 	struct mgmt_mode *cp = cmd->param;
2073 	u8 val = !!cp->val;
2074 	int err;
2075 
2076 	if (!val) {
2077 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2078 
2079 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2080 			hci_disable_advertising_sync(hdev);
2081 
2082 		if (ext_adv_capable(hdev))
2083 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2084 	} else {
2085 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2086 	}
2087 
2088 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2089 
2090 	/* Make sure the controller has a good default for
2091 	 * advertising data. Restrict the update to when LE
2092 	 * has actually been enabled. During power on, the
2093 	 * update in powered_update_hci will take care of it.
2094 	 */
2095 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2096 		if (ext_adv_capable(hdev)) {
2097 			int status;
2098 
2099 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2100 			if (!status)
2101 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2102 		} else {
2103 			hci_update_adv_data_sync(hdev, 0x00);
2104 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2105 		}
2106 
2107 		hci_update_passive_scan(hdev);
2108 	}
2109 
2110 	return err;
2111 }
2112 
2113 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2114 {
2115 	struct mgmt_pending_cmd *cmd = data;
2116 	u8 status = mgmt_status(err);
2117 	struct sock *sk = cmd->sk;
2118 
2119 	if (status) {
2120 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2121 				     cmd_status_rsp, &status);
2122 		return;
2123 	}
2124 
2125 	mgmt_pending_remove(cmd);
2126 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2127 }
2128 
2129 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2130 {
2131 	struct mgmt_pending_cmd *cmd = data;
2132 	struct mgmt_cp_set_mesh *cp = cmd->param;
2133 	size_t len = cmd->param_len;
2134 
2135 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2136 
2137 	if (cp->enable)
2138 		hci_dev_set_flag(hdev, HCI_MESH);
2139 	else
2140 		hci_dev_clear_flag(hdev, HCI_MESH);
2141 
2142 	len -= sizeof(*cp);
2143 
2144 	/* If filters don't fit, forward all adv pkts */
2145 	if (len <= sizeof(hdev->mesh_ad_types))
2146 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2147 
2148 	hci_update_passive_scan_sync(hdev);
2149 	return 0;
2150 }
2151 
2152 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2153 {
2154 	struct mgmt_cp_set_mesh *cp = data;
2155 	struct mgmt_pending_cmd *cmd;
2156 	int err = 0;
2157 
2158 	bt_dev_dbg(hdev, "sock %p", sk);
2159 
2160 	if (!lmp_le_capable(hdev) ||
2161 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2162 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2163 				       MGMT_STATUS_NOT_SUPPORTED);
2164 
2165 	if (cp->enable != 0x00 && cp->enable != 0x01)
2166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2167 				       MGMT_STATUS_INVALID_PARAMS);
2168 
2169 	hci_dev_lock(hdev);
2170 
2171 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2172 	if (!cmd)
2173 		err = -ENOMEM;
2174 	else
2175 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2176 					 set_mesh_complete);
2177 
2178 	if (err < 0) {
2179 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 				      MGMT_STATUS_FAILED);
2181 
2182 		if (cmd)
2183 			mgmt_pending_remove(cmd);
2184 	}
2185 
2186 	hci_dev_unlock(hdev);
2187 	return err;
2188 }
2189 
2190 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2191 {
2192 	struct mgmt_mesh_tx *mesh_tx = data;
2193 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2194 	unsigned long mesh_send_interval;
2195 	u8 mgmt_err = mgmt_status(err);
2196 
2197 	/* Report any errors here, but don't report completion */
2198 
2199 	if (mgmt_err) {
2200 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2201 		/* Send Complete Error Code for handle */
2202 		mesh_send_complete(hdev, mesh_tx, false);
2203 		return;
2204 	}
2205 
2206 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2207 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2208 			   mesh_send_interval);
2209 }
2210 
2211 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2212 {
2213 	struct mgmt_mesh_tx *mesh_tx = data;
2214 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 	struct adv_info *adv, *next_instance;
2216 	u8 instance = hdev->le_num_of_adv_sets + 1;
2217 	u16 timeout, duration;
2218 	int err = 0;
2219 
2220 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2221 		return MGMT_STATUS_BUSY;
2222 
2223 	timeout = 1000;
2224 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2225 	adv = hci_add_adv_instance(hdev, instance, 0,
2226 				   send->adv_data_len, send->adv_data,
2227 				   0, NULL,
2228 				   timeout, duration,
2229 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2230 				   hdev->le_adv_min_interval,
2231 				   hdev->le_adv_max_interval,
2232 				   mesh_tx->handle);
2233 
2234 	if (!IS_ERR(adv))
2235 		mesh_tx->instance = instance;
2236 	else
2237 		err = PTR_ERR(adv);
2238 
2239 	if (hdev->cur_adv_instance == instance) {
2240 		/* If the currently advertised instance is being changed then
2241 		 * cancel the current advertising and schedule the next
2242 		 * instance. If there is only one instance then the overridden
2243 		 * advertising data will be visible right away.
2244 		 */
2245 		cancel_adv_timeout(hdev);
2246 
2247 		next_instance = hci_get_next_instance(hdev, instance);
2248 		if (next_instance)
2249 			instance = next_instance->instance;
2250 		else
2251 			instance = 0;
2252 	} else if (hdev->adv_instance_timeout) {
2253 		/* Immediately advertise the new instance if no other, or
2254 		 * let it go naturally from queue if ADV is already happening
2255 		 */
2256 		instance = 0;
2257 	}
2258 
2259 	if (instance)
2260 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2261 
2262 	return err;
2263 }
2264 
2265 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2266 {
2267 	struct mgmt_rp_mesh_read_features *rp = data;
2268 
2269 	if (rp->used_handles >= rp->max_handles)
2270 		return;
2271 
2272 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2273 }
2274 
2275 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2276 			 void *data, u16 len)
2277 {
2278 	struct mgmt_rp_mesh_read_features rp;
2279 
2280 	if (!lmp_le_capable(hdev) ||
2281 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2282 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2283 				       MGMT_STATUS_NOT_SUPPORTED);
2284 
2285 	memset(&rp, 0, sizeof(rp));
2286 	rp.index = cpu_to_le16(hdev->id);
2287 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2288 		rp.max_handles = MESH_HANDLES_MAX;
2289 
2290 	hci_dev_lock(hdev);
2291 
2292 	if (rp.max_handles)
2293 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2294 
2295 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2296 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2297 
2298 	hci_dev_unlock(hdev);
2299 	return 0;
2300 }
2301 
2302 static int send_cancel(struct hci_dev *hdev, void *data)
2303 {
2304 	struct mgmt_pending_cmd *cmd = data;
2305 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2306 	struct mgmt_mesh_tx *mesh_tx;
2307 
2308 	if (!cancel->handle) {
2309 		do {
2310 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2311 
2312 			if (mesh_tx)
2313 				mesh_send_complete(hdev, mesh_tx, false);
2314 		} while (mesh_tx);
2315 	} else {
2316 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2317 
2318 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2319 			mesh_send_complete(hdev, mesh_tx, false);
2320 	}
2321 
2322 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2323 			  0, NULL, 0);
2324 	mgmt_pending_free(cmd);
2325 
2326 	return 0;
2327 }
2328 
2329 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2330 			    void *data, u16 len)
2331 {
2332 	struct mgmt_pending_cmd *cmd;
2333 	int err;
2334 
2335 	if (!lmp_le_capable(hdev) ||
2336 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2337 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 				       MGMT_STATUS_NOT_SUPPORTED);
2339 
2340 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2341 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2342 				       MGMT_STATUS_REJECTED);
2343 
2344 	hci_dev_lock(hdev);
2345 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2346 	if (!cmd)
2347 		err = -ENOMEM;
2348 	else
2349 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2350 
2351 	if (err < 0) {
2352 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 				      MGMT_STATUS_FAILED);
2354 
2355 		if (cmd)
2356 			mgmt_pending_free(cmd);
2357 	}
2358 
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
2363 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2364 {
2365 	struct mgmt_mesh_tx *mesh_tx;
2366 	struct mgmt_cp_mesh_send *send = data;
2367 	struct mgmt_rp_mesh_read_features rp;
2368 	bool sending;
2369 	int err = 0;
2370 
2371 	if (!lmp_le_capable(hdev) ||
2372 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2373 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2374 				       MGMT_STATUS_NOT_SUPPORTED);
2375 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2376 	    len <= MGMT_MESH_SEND_SIZE ||
2377 	    len > (MGMT_MESH_SEND_SIZE + 31))
2378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2379 				       MGMT_STATUS_REJECTED);
2380 
2381 	hci_dev_lock(hdev);
2382 
2383 	memset(&rp, 0, sizeof(rp));
2384 	rp.max_handles = MESH_HANDLES_MAX;
2385 
2386 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2387 
2388 	if (rp.max_handles <= rp.used_handles) {
2389 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2390 				      MGMT_STATUS_BUSY);
2391 		goto done;
2392 	}
2393 
2394 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2395 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2396 
2397 	if (!mesh_tx)
2398 		err = -ENOMEM;
2399 	else if (!sending)
2400 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2401 					 mesh_send_start_complete);
2402 
2403 	if (err < 0) {
2404 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2405 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2406 				      MGMT_STATUS_FAILED);
2407 
2408 		if (mesh_tx) {
2409 			if (sending)
2410 				mgmt_mesh_remove(mesh_tx);
2411 		}
2412 	} else {
2413 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2414 
2415 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2416 				  &mesh_tx->handle, 1);
2417 	}
2418 
2419 done:
2420 	hci_dev_unlock(hdev);
2421 	return err;
2422 }
2423 
2424 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2425 {
2426 	struct mgmt_mode *cp = data;
2427 	struct mgmt_pending_cmd *cmd;
2428 	int err;
2429 	u8 val, enabled;
2430 
2431 	bt_dev_dbg(hdev, "sock %p", sk);
2432 
2433 	if (!lmp_le_capable(hdev))
2434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2435 				       MGMT_STATUS_NOT_SUPPORTED);
2436 
2437 	if (cp->val != 0x00 && cp->val != 0x01)
2438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2439 				       MGMT_STATUS_INVALID_PARAMS);
2440 
2441 	/* Bluetooth single mode LE only controllers or dual-mode
2442 	 * controllers configured as LE only devices, do not allow
2443 	 * switching LE off. These have either LE enabled explicitly
2444 	 * or BR/EDR has been previously switched off.
2445 	 *
2446 	 * When trying to enable an already enabled LE, then gracefully
2447 	 * send a positive response. Trying to disable it however will
2448 	 * result into rejection.
2449 	 */
2450 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2451 		if (cp->val == 0x01)
2452 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2453 
2454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2455 				       MGMT_STATUS_REJECTED);
2456 	}
2457 
2458 	hci_dev_lock(hdev);
2459 
2460 	val = !!cp->val;
2461 	enabled = lmp_host_le_capable(hdev);
2462 
2463 	if (!hdev_is_powered(hdev) || val == enabled) {
2464 		bool changed = false;
2465 
2466 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2467 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2468 			changed = true;
2469 		}
2470 
2471 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2472 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2473 			changed = true;
2474 		}
2475 
2476 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2477 		if (err < 0)
2478 			goto unlock;
2479 
2480 		if (changed)
2481 			err = new_settings(hdev, sk);
2482 
2483 		goto unlock;
2484 	}
2485 
2486 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2487 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2488 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2489 				      MGMT_STATUS_BUSY);
2490 		goto unlock;
2491 	}
2492 
2493 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2494 	if (!cmd)
2495 		err = -ENOMEM;
2496 	else
2497 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2498 					 set_le_complete);
2499 
2500 	if (err < 0) {
2501 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502 				      MGMT_STATUS_FAILED);
2503 
2504 		if (cmd)
2505 			mgmt_pending_remove(cmd);
2506 	}
2507 
2508 unlock:
2509 	hci_dev_unlock(hdev);
2510 	return err;
2511 }
2512 
2513 /* This is a helper function to test for pending mgmt commands that can
2514  * cause CoD or EIR HCI commands. We can only allow one such pending
2515  * mgmt command at a time since otherwise we cannot easily track what
2516  * the current values are, will be, and based on that calculate if a new
2517  * HCI command needs to be sent and if yes with what value.
2518  */
2519 static bool pending_eir_or_class(struct hci_dev *hdev)
2520 {
2521 	struct mgmt_pending_cmd *cmd;
2522 
2523 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2524 		switch (cmd->opcode) {
2525 		case MGMT_OP_ADD_UUID:
2526 		case MGMT_OP_REMOVE_UUID:
2527 		case MGMT_OP_SET_DEV_CLASS:
2528 		case MGMT_OP_SET_POWERED:
2529 			return true;
2530 		}
2531 	}
2532 
2533 	return false;
2534 }
2535 
2536 static const u8 bluetooth_base_uuid[] = {
2537 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2538 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2539 };
2540 
2541 static u8 get_uuid_size(const u8 *uuid)
2542 {
2543 	u32 val;
2544 
2545 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2546 		return 128;
2547 
2548 	val = get_unaligned_le32(&uuid[12]);
2549 	if (val > 0xffff)
2550 		return 32;
2551 
2552 	return 16;
2553 }
2554 
2555 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2556 {
2557 	struct mgmt_pending_cmd *cmd = data;
2558 
2559 	bt_dev_dbg(hdev, "err %d", err);
2560 
2561 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2562 			  mgmt_status(err), hdev->dev_class, 3);
2563 
2564 	mgmt_pending_free(cmd);
2565 }
2566 
2567 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2568 {
2569 	int err;
2570 
2571 	err = hci_update_class_sync(hdev);
2572 	if (err)
2573 		return err;
2574 
2575 	return hci_update_eir_sync(hdev);
2576 }
2577 
2578 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2579 {
2580 	struct mgmt_cp_add_uuid *cp = data;
2581 	struct mgmt_pending_cmd *cmd;
2582 	struct bt_uuid *uuid;
2583 	int err;
2584 
2585 	bt_dev_dbg(hdev, "sock %p", sk);
2586 
2587 	hci_dev_lock(hdev);
2588 
2589 	if (pending_eir_or_class(hdev)) {
2590 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2591 				      MGMT_STATUS_BUSY);
2592 		goto failed;
2593 	}
2594 
2595 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2596 	if (!uuid) {
2597 		err = -ENOMEM;
2598 		goto failed;
2599 	}
2600 
2601 	memcpy(uuid->uuid, cp->uuid, 16);
2602 	uuid->svc_hint = cp->svc_hint;
2603 	uuid->size = get_uuid_size(cp->uuid);
2604 
2605 	list_add_tail(&uuid->list, &hdev->uuids);
2606 
2607 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2608 	if (!cmd) {
2609 		err = -ENOMEM;
2610 		goto failed;
2611 	}
2612 
2613 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2614 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2615 	 */
2616 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2617 				  mgmt_class_complete);
2618 	if (err < 0) {
2619 		mgmt_pending_free(cmd);
2620 		goto failed;
2621 	}
2622 
2623 failed:
2624 	hci_dev_unlock(hdev);
2625 	return err;
2626 }
2627 
2628 static bool enable_service_cache(struct hci_dev *hdev)
2629 {
2630 	if (!hdev_is_powered(hdev))
2631 		return false;
2632 
2633 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2634 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2635 				   CACHE_TIMEOUT);
2636 		return true;
2637 	}
2638 
2639 	return false;
2640 }
2641 
2642 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2643 {
2644 	int err;
2645 
2646 	err = hci_update_class_sync(hdev);
2647 	if (err)
2648 		return err;
2649 
2650 	return hci_update_eir_sync(hdev);
2651 }
2652 
2653 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2654 		       u16 len)
2655 {
2656 	struct mgmt_cp_remove_uuid *cp = data;
2657 	struct mgmt_pending_cmd *cmd;
2658 	struct bt_uuid *match, *tmp;
2659 	static const u8 bt_uuid_any[] = {
2660 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2661 	};
2662 	int err, found;
2663 
2664 	bt_dev_dbg(hdev, "sock %p", sk);
2665 
2666 	hci_dev_lock(hdev);
2667 
2668 	if (pending_eir_or_class(hdev)) {
2669 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2670 				      MGMT_STATUS_BUSY);
2671 		goto unlock;
2672 	}
2673 
2674 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2675 		hci_uuids_clear(hdev);
2676 
2677 		if (enable_service_cache(hdev)) {
2678 			err = mgmt_cmd_complete(sk, hdev->id,
2679 						MGMT_OP_REMOVE_UUID,
2680 						0, hdev->dev_class, 3);
2681 			goto unlock;
2682 		}
2683 
2684 		goto update_class;
2685 	}
2686 
2687 	found = 0;
2688 
2689 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2690 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2691 			continue;
2692 
2693 		list_del(&match->list);
2694 		kfree(match);
2695 		found++;
2696 	}
2697 
2698 	if (found == 0) {
2699 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2700 				      MGMT_STATUS_INVALID_PARAMS);
2701 		goto unlock;
2702 	}
2703 
2704 update_class:
2705 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2706 	if (!cmd) {
2707 		err = -ENOMEM;
2708 		goto unlock;
2709 	}
2710 
2711 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2712 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2713 	 */
2714 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2715 				  mgmt_class_complete);
2716 	if (err < 0)
2717 		mgmt_pending_free(cmd);
2718 
2719 unlock:
2720 	hci_dev_unlock(hdev);
2721 	return err;
2722 }
2723 
2724 static int set_class_sync(struct hci_dev *hdev, void *data)
2725 {
2726 	int err = 0;
2727 
2728 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2729 		cancel_delayed_work_sync(&hdev->service_cache);
2730 		err = hci_update_eir_sync(hdev);
2731 	}
2732 
2733 	if (err)
2734 		return err;
2735 
2736 	return hci_update_class_sync(hdev);
2737 }
2738 
2739 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2740 			 u16 len)
2741 {
2742 	struct mgmt_cp_set_dev_class *cp = data;
2743 	struct mgmt_pending_cmd *cmd;
2744 	int err;
2745 
2746 	bt_dev_dbg(hdev, "sock %p", sk);
2747 
2748 	if (!lmp_bredr_capable(hdev))
2749 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 				       MGMT_STATUS_NOT_SUPPORTED);
2751 
2752 	hci_dev_lock(hdev);
2753 
2754 	if (pending_eir_or_class(hdev)) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 				      MGMT_STATUS_BUSY);
2757 		goto unlock;
2758 	}
2759 
2760 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2761 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 				      MGMT_STATUS_INVALID_PARAMS);
2763 		goto unlock;
2764 	}
2765 
2766 	hdev->major_class = cp->major;
2767 	hdev->minor_class = cp->minor;
2768 
2769 	if (!hdev_is_powered(hdev)) {
2770 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2771 					hdev->dev_class, 3);
2772 		goto unlock;
2773 	}
2774 
2775 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2776 	if (!cmd) {
2777 		err = -ENOMEM;
2778 		goto unlock;
2779 	}
2780 
2781 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2782 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2783 	 */
2784 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2785 				  mgmt_class_complete);
2786 	if (err < 0)
2787 		mgmt_pending_free(cmd);
2788 
2789 unlock:
2790 	hci_dev_unlock(hdev);
2791 	return err;
2792 }
2793 
2794 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2795 			  u16 len)
2796 {
2797 	struct mgmt_cp_load_link_keys *cp = data;
2798 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2799 				   sizeof(struct mgmt_link_key_info));
2800 	u16 key_count, expected_len;
2801 	bool changed;
2802 	int i;
2803 
2804 	bt_dev_dbg(hdev, "sock %p", sk);
2805 
2806 	if (!lmp_bredr_capable(hdev))
2807 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2808 				       MGMT_STATUS_NOT_SUPPORTED);
2809 
2810 	key_count = __le16_to_cpu(cp->key_count);
2811 	if (key_count > max_key_count) {
2812 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2813 			   key_count);
2814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2815 				       MGMT_STATUS_INVALID_PARAMS);
2816 	}
2817 
2818 	expected_len = struct_size(cp, keys, key_count);
2819 	if (expected_len != len) {
2820 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2821 			   expected_len, len);
2822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2823 				       MGMT_STATUS_INVALID_PARAMS);
2824 	}
2825 
2826 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828 				       MGMT_STATUS_INVALID_PARAMS);
2829 
2830 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2831 		   key_count);
2832 
2833 	for (i = 0; i < key_count; i++) {
2834 		struct mgmt_link_key_info *key = &cp->keys[i];
2835 
2836 		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2837 		if (key->type > 0x08)
2838 			return mgmt_cmd_status(sk, hdev->id,
2839 					       MGMT_OP_LOAD_LINK_KEYS,
2840 					       MGMT_STATUS_INVALID_PARAMS);
2841 	}
2842 
2843 	hci_dev_lock(hdev);
2844 
2845 	hci_link_keys_clear(hdev);
2846 
2847 	if (cp->debug_keys)
2848 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2849 	else
2850 		changed = hci_dev_test_and_clear_flag(hdev,
2851 						      HCI_KEEP_DEBUG_KEYS);
2852 
2853 	if (changed)
2854 		new_settings(hdev, NULL);
2855 
2856 	for (i = 0; i < key_count; i++) {
2857 		struct mgmt_link_key_info *key = &cp->keys[i];
2858 
2859 		if (hci_is_blocked_key(hdev,
2860 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2861 				       key->val)) {
2862 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2863 				    &key->addr.bdaddr);
2864 			continue;
2865 		}
2866 
2867 		/* Always ignore debug keys and require a new pairing if
2868 		 * the user wants to use them.
2869 		 */
2870 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2871 			continue;
2872 
2873 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2874 				 key->type, key->pin_len, NULL);
2875 	}
2876 
2877 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2878 
2879 	hci_dev_unlock(hdev);
2880 
2881 	return 0;
2882 }
2883 
2884 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2885 			   u8 addr_type, struct sock *skip_sk)
2886 {
2887 	struct mgmt_ev_device_unpaired ev;
2888 
2889 	bacpy(&ev.addr.bdaddr, bdaddr);
2890 	ev.addr.type = addr_type;
2891 
2892 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2893 			  skip_sk);
2894 }
2895 
2896 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2897 {
2898 	struct mgmt_pending_cmd *cmd = data;
2899 	struct mgmt_cp_unpair_device *cp = cmd->param;
2900 
2901 	if (!err)
2902 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2903 
2904 	cmd->cmd_complete(cmd, err);
2905 	mgmt_pending_free(cmd);
2906 }
2907 
2908 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2909 {
2910 	struct mgmt_pending_cmd *cmd = data;
2911 	struct mgmt_cp_unpair_device *cp = cmd->param;
2912 	struct hci_conn *conn;
2913 
2914 	if (cp->addr.type == BDADDR_BREDR)
2915 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2916 					       &cp->addr.bdaddr);
2917 	else
2918 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2919 					       le_addr_type(cp->addr.type));
2920 
2921 	if (!conn)
2922 		return 0;
2923 
2924 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2925 }
2926 
2927 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2928 			 u16 len)
2929 {
2930 	struct mgmt_cp_unpair_device *cp = data;
2931 	struct mgmt_rp_unpair_device rp;
2932 	struct hci_conn_params *params;
2933 	struct mgmt_pending_cmd *cmd;
2934 	struct hci_conn *conn;
2935 	u8 addr_type;
2936 	int err;
2937 
2938 	memset(&rp, 0, sizeof(rp));
2939 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2940 	rp.addr.type = cp->addr.type;
2941 
2942 	if (!bdaddr_type_is_valid(cp->addr.type))
2943 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2944 					 MGMT_STATUS_INVALID_PARAMS,
2945 					 &rp, sizeof(rp));
2946 
2947 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2948 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2949 					 MGMT_STATUS_INVALID_PARAMS,
2950 					 &rp, sizeof(rp));
2951 
2952 	hci_dev_lock(hdev);
2953 
2954 	if (!hdev_is_powered(hdev)) {
2955 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2956 					MGMT_STATUS_NOT_POWERED, &rp,
2957 					sizeof(rp));
2958 		goto unlock;
2959 	}
2960 
2961 	if (cp->addr.type == BDADDR_BREDR) {
2962 		/* If disconnection is requested, then look up the
2963 		 * connection. If the remote device is connected, it
2964 		 * will be later used to terminate the link.
2965 		 *
2966 		 * Setting it to NULL explicitly will cause no
2967 		 * termination of the link.
2968 		 */
2969 		if (cp->disconnect)
2970 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 						       &cp->addr.bdaddr);
2972 		else
2973 			conn = NULL;
2974 
2975 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2976 		if (err < 0) {
2977 			err = mgmt_cmd_complete(sk, hdev->id,
2978 						MGMT_OP_UNPAIR_DEVICE,
2979 						MGMT_STATUS_NOT_PAIRED, &rp,
2980 						sizeof(rp));
2981 			goto unlock;
2982 		}
2983 
2984 		goto done;
2985 	}
2986 
2987 	/* LE address type */
2988 	addr_type = le_addr_type(cp->addr.type);
2989 
2990 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2991 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2992 	if (err < 0) {
2993 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2994 					MGMT_STATUS_NOT_PAIRED, &rp,
2995 					sizeof(rp));
2996 		goto unlock;
2997 	}
2998 
2999 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3000 	if (!conn) {
3001 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3002 		goto done;
3003 	}
3004 
3005 
3006 	/* Defer clearing up the connection parameters until closing to
3007 	 * give a chance of keeping them if a repairing happens.
3008 	 */
3009 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3010 
3011 	/* Disable auto-connection parameters if present */
3012 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3013 	if (params) {
3014 		if (params->explicit_connect)
3015 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 		else
3017 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3018 	}
3019 
3020 	/* If disconnection is not requested, then clear the connection
3021 	 * variable so that the link is not terminated.
3022 	 */
3023 	if (!cp->disconnect)
3024 		conn = NULL;
3025 
3026 done:
3027 	/* If the connection variable is set, then termination of the
3028 	 * link is requested.
3029 	 */
3030 	if (!conn) {
3031 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3032 					&rp, sizeof(rp));
3033 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3034 		goto unlock;
3035 	}
3036 
3037 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3038 			       sizeof(*cp));
3039 	if (!cmd) {
3040 		err = -ENOMEM;
3041 		goto unlock;
3042 	}
3043 
3044 	cmd->cmd_complete = addr_cmd_complete;
3045 
3046 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3047 				 unpair_device_complete);
3048 	if (err < 0)
3049 		mgmt_pending_free(cmd);
3050 
3051 unlock:
3052 	hci_dev_unlock(hdev);
3053 	return err;
3054 }
3055 
3056 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3057 		      u16 len)
3058 {
3059 	struct mgmt_cp_disconnect *cp = data;
3060 	struct mgmt_rp_disconnect rp;
3061 	struct mgmt_pending_cmd *cmd;
3062 	struct hci_conn *conn;
3063 	int err;
3064 
3065 	bt_dev_dbg(hdev, "sock %p", sk);
3066 
3067 	memset(&rp, 0, sizeof(rp));
3068 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3069 	rp.addr.type = cp->addr.type;
3070 
3071 	if (!bdaddr_type_is_valid(cp->addr.type))
3072 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3073 					 MGMT_STATUS_INVALID_PARAMS,
3074 					 &rp, sizeof(rp));
3075 
3076 	hci_dev_lock(hdev);
3077 
3078 	if (!test_bit(HCI_UP, &hdev->flags)) {
3079 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3080 					MGMT_STATUS_NOT_POWERED, &rp,
3081 					sizeof(rp));
3082 		goto failed;
3083 	}
3084 
3085 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3086 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3087 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3088 		goto failed;
3089 	}
3090 
3091 	if (cp->addr.type == BDADDR_BREDR)
3092 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3093 					       &cp->addr.bdaddr);
3094 	else
3095 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3096 					       le_addr_type(cp->addr.type));
3097 
3098 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3099 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3100 					MGMT_STATUS_NOT_CONNECTED, &rp,
3101 					sizeof(rp));
3102 		goto failed;
3103 	}
3104 
3105 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3106 	if (!cmd) {
3107 		err = -ENOMEM;
3108 		goto failed;
3109 	}
3110 
3111 	cmd->cmd_complete = generic_cmd_complete;
3112 
3113 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3114 	if (err < 0)
3115 		mgmt_pending_remove(cmd);
3116 
3117 failed:
3118 	hci_dev_unlock(hdev);
3119 	return err;
3120 }
3121 
3122 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3123 {
3124 	switch (link_type) {
3125 	case ISO_LINK:
3126 	case LE_LINK:
3127 		switch (addr_type) {
3128 		case ADDR_LE_DEV_PUBLIC:
3129 			return BDADDR_LE_PUBLIC;
3130 
3131 		default:
3132 			/* Fallback to LE Random address type */
3133 			return BDADDR_LE_RANDOM;
3134 		}
3135 
3136 	default:
3137 		/* Fallback to BR/EDR type */
3138 		return BDADDR_BREDR;
3139 	}
3140 }
3141 
3142 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3143 			   u16 data_len)
3144 {
3145 	struct mgmt_rp_get_connections *rp;
3146 	struct hci_conn *c;
3147 	int err;
3148 	u16 i;
3149 
3150 	bt_dev_dbg(hdev, "sock %p", sk);
3151 
3152 	hci_dev_lock(hdev);
3153 
3154 	if (!hdev_is_powered(hdev)) {
3155 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3156 				      MGMT_STATUS_NOT_POWERED);
3157 		goto unlock;
3158 	}
3159 
3160 	i = 0;
3161 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3162 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3163 			i++;
3164 	}
3165 
3166 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3167 	if (!rp) {
3168 		err = -ENOMEM;
3169 		goto unlock;
3170 	}
3171 
3172 	i = 0;
3173 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3174 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3175 			continue;
3176 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3177 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3178 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3179 			continue;
3180 		i++;
3181 	}
3182 
3183 	rp->conn_count = cpu_to_le16(i);
3184 
3185 	/* Recalculate length in case of filtered SCO connections, etc */
3186 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3187 				struct_size(rp, addr, i));
3188 
3189 	kfree(rp);
3190 
3191 unlock:
3192 	hci_dev_unlock(hdev);
3193 	return err;
3194 }
3195 
3196 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3197 				   struct mgmt_cp_pin_code_neg_reply *cp)
3198 {
3199 	struct mgmt_pending_cmd *cmd;
3200 	int err;
3201 
3202 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3203 			       sizeof(*cp));
3204 	if (!cmd)
3205 		return -ENOMEM;
3206 
3207 	cmd->cmd_complete = addr_cmd_complete;
3208 
3209 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3210 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3211 	if (err < 0)
3212 		mgmt_pending_remove(cmd);
3213 
3214 	return err;
3215 }
3216 
3217 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3218 			  u16 len)
3219 {
3220 	struct hci_conn *conn;
3221 	struct mgmt_cp_pin_code_reply *cp = data;
3222 	struct hci_cp_pin_code_reply reply;
3223 	struct mgmt_pending_cmd *cmd;
3224 	int err;
3225 
3226 	bt_dev_dbg(hdev, "sock %p", sk);
3227 
3228 	hci_dev_lock(hdev);
3229 
3230 	if (!hdev_is_powered(hdev)) {
3231 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3232 				      MGMT_STATUS_NOT_POWERED);
3233 		goto failed;
3234 	}
3235 
3236 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3237 	if (!conn) {
3238 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3239 				      MGMT_STATUS_NOT_CONNECTED);
3240 		goto failed;
3241 	}
3242 
3243 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3244 		struct mgmt_cp_pin_code_neg_reply ncp;
3245 
3246 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3247 
3248 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3249 
3250 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3251 		if (err >= 0)
3252 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3253 					      MGMT_STATUS_INVALID_PARAMS);
3254 
3255 		goto failed;
3256 	}
3257 
3258 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3259 	if (!cmd) {
3260 		err = -ENOMEM;
3261 		goto failed;
3262 	}
3263 
3264 	cmd->cmd_complete = addr_cmd_complete;
3265 
3266 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3267 	reply.pin_len = cp->pin_len;
3268 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3269 
3270 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3271 	if (err < 0)
3272 		mgmt_pending_remove(cmd);
3273 
3274 failed:
3275 	hci_dev_unlock(hdev);
3276 	return err;
3277 }
3278 
3279 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3280 			     u16 len)
3281 {
3282 	struct mgmt_cp_set_io_capability *cp = data;
3283 
3284 	bt_dev_dbg(hdev, "sock %p", sk);
3285 
3286 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3288 				       MGMT_STATUS_INVALID_PARAMS);
3289 
3290 	hci_dev_lock(hdev);
3291 
3292 	hdev->io_capability = cp->io_capability;
3293 
3294 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3295 
3296 	hci_dev_unlock(hdev);
3297 
3298 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3299 				 NULL, 0);
3300 }
3301 
3302 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3303 {
3304 	struct hci_dev *hdev = conn->hdev;
3305 	struct mgmt_pending_cmd *cmd;
3306 
3307 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3308 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3309 			continue;
3310 
3311 		if (cmd->user_data != conn)
3312 			continue;
3313 
3314 		return cmd;
3315 	}
3316 
3317 	return NULL;
3318 }
3319 
3320 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3321 {
3322 	struct mgmt_rp_pair_device rp;
3323 	struct hci_conn *conn = cmd->user_data;
3324 	int err;
3325 
3326 	bacpy(&rp.addr.bdaddr, &conn->dst);
3327 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3328 
3329 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3330 				status, &rp, sizeof(rp));
3331 
3332 	/* So we don't get further callbacks for this connection */
3333 	conn->connect_cfm_cb = NULL;
3334 	conn->security_cfm_cb = NULL;
3335 	conn->disconn_cfm_cb = NULL;
3336 
3337 	hci_conn_drop(conn);
3338 
3339 	/* The device is paired so there is no need to remove
3340 	 * its connection parameters anymore.
3341 	 */
3342 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3343 
3344 	hci_conn_put(conn);
3345 
3346 	return err;
3347 }
3348 
3349 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3350 {
3351 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3352 	struct mgmt_pending_cmd *cmd;
3353 
3354 	cmd = find_pairing(conn);
3355 	if (cmd) {
3356 		cmd->cmd_complete(cmd, status);
3357 		mgmt_pending_remove(cmd);
3358 	}
3359 }
3360 
3361 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3362 {
3363 	struct mgmt_pending_cmd *cmd;
3364 
3365 	BT_DBG("status %u", status);
3366 
3367 	cmd = find_pairing(conn);
3368 	if (!cmd) {
3369 		BT_DBG("Unable to find a pending command");
3370 		return;
3371 	}
3372 
3373 	cmd->cmd_complete(cmd, mgmt_status(status));
3374 	mgmt_pending_remove(cmd);
3375 }
3376 
3377 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3378 {
3379 	struct mgmt_pending_cmd *cmd;
3380 
3381 	BT_DBG("status %u", status);
3382 
3383 	if (!status)
3384 		return;
3385 
3386 	cmd = find_pairing(conn);
3387 	if (!cmd) {
3388 		BT_DBG("Unable to find a pending command");
3389 		return;
3390 	}
3391 
3392 	cmd->cmd_complete(cmd, mgmt_status(status));
3393 	mgmt_pending_remove(cmd);
3394 }
3395 
3396 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3397 		       u16 len)
3398 {
3399 	struct mgmt_cp_pair_device *cp = data;
3400 	struct mgmt_rp_pair_device rp;
3401 	struct mgmt_pending_cmd *cmd;
3402 	u8 sec_level, auth_type;
3403 	struct hci_conn *conn;
3404 	int err;
3405 
3406 	bt_dev_dbg(hdev, "sock %p", sk);
3407 
3408 	memset(&rp, 0, sizeof(rp));
3409 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3410 	rp.addr.type = cp->addr.type;
3411 
3412 	if (!bdaddr_type_is_valid(cp->addr.type))
3413 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3414 					 MGMT_STATUS_INVALID_PARAMS,
3415 					 &rp, sizeof(rp));
3416 
3417 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3418 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3419 					 MGMT_STATUS_INVALID_PARAMS,
3420 					 &rp, sizeof(rp));
3421 
3422 	hci_dev_lock(hdev);
3423 
3424 	if (!hdev_is_powered(hdev)) {
3425 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3426 					MGMT_STATUS_NOT_POWERED, &rp,
3427 					sizeof(rp));
3428 		goto unlock;
3429 	}
3430 
3431 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3432 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3433 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3434 					sizeof(rp));
3435 		goto unlock;
3436 	}
3437 
3438 	sec_level = BT_SECURITY_MEDIUM;
3439 	auth_type = HCI_AT_DEDICATED_BONDING;
3440 
3441 	if (cp->addr.type == BDADDR_BREDR) {
3442 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3443 				       auth_type, CONN_REASON_PAIR_DEVICE,
3444 				       HCI_ACL_CONN_TIMEOUT);
3445 	} else {
3446 		u8 addr_type = le_addr_type(cp->addr.type);
3447 		struct hci_conn_params *p;
3448 
3449 		/* When pairing a new device, it is expected to remember
3450 		 * this device for future connections. Adding the connection
3451 		 * parameter information ahead of time allows tracking
3452 		 * of the peripheral preferred values and will speed up any
3453 		 * further connection establishment.
3454 		 *
3455 		 * If connection parameters already exist, then they
3456 		 * will be kept and this function does nothing.
3457 		 */
3458 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3459 		if (!p) {
3460 			err = -EIO;
3461 			goto unlock;
3462 		}
3463 
3464 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3465 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3466 
3467 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3468 					   sec_level, HCI_LE_CONN_TIMEOUT,
3469 					   CONN_REASON_PAIR_DEVICE);
3470 	}
3471 
3472 	if (IS_ERR(conn)) {
3473 		int status;
3474 
3475 		if (PTR_ERR(conn) == -EBUSY)
3476 			status = MGMT_STATUS_BUSY;
3477 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3478 			status = MGMT_STATUS_NOT_SUPPORTED;
3479 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3480 			status = MGMT_STATUS_REJECTED;
3481 		else
3482 			status = MGMT_STATUS_CONNECT_FAILED;
3483 
3484 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3485 					status, &rp, sizeof(rp));
3486 		goto unlock;
3487 	}
3488 
3489 	if (conn->connect_cfm_cb) {
3490 		hci_conn_drop(conn);
3491 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3492 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3493 		goto unlock;
3494 	}
3495 
3496 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3497 	if (!cmd) {
3498 		err = -ENOMEM;
3499 		hci_conn_drop(conn);
3500 		goto unlock;
3501 	}
3502 
3503 	cmd->cmd_complete = pairing_complete;
3504 
3505 	/* For LE, just connecting isn't a proof that the pairing finished */
3506 	if (cp->addr.type == BDADDR_BREDR) {
3507 		conn->connect_cfm_cb = pairing_complete_cb;
3508 		conn->security_cfm_cb = pairing_complete_cb;
3509 		conn->disconn_cfm_cb = pairing_complete_cb;
3510 	} else {
3511 		conn->connect_cfm_cb = le_pairing_complete_cb;
3512 		conn->security_cfm_cb = le_pairing_complete_cb;
3513 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3514 	}
3515 
3516 	conn->io_capability = cp->io_cap;
3517 	cmd->user_data = hci_conn_get(conn);
3518 
3519 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3520 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3521 		cmd->cmd_complete(cmd, 0);
3522 		mgmt_pending_remove(cmd);
3523 	}
3524 
3525 	err = 0;
3526 
3527 unlock:
3528 	hci_dev_unlock(hdev);
3529 	return err;
3530 }
3531 
3532 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3533 			      u16 len)
3534 {
3535 	struct mgmt_addr_info *addr = data;
3536 	struct mgmt_pending_cmd *cmd;
3537 	struct hci_conn *conn;
3538 	int err;
3539 
3540 	bt_dev_dbg(hdev, "sock %p", sk);
3541 
3542 	hci_dev_lock(hdev);
3543 
3544 	if (!hdev_is_powered(hdev)) {
3545 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3546 				      MGMT_STATUS_NOT_POWERED);
3547 		goto unlock;
3548 	}
3549 
3550 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3551 	if (!cmd) {
3552 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3553 				      MGMT_STATUS_INVALID_PARAMS);
3554 		goto unlock;
3555 	}
3556 
3557 	conn = cmd->user_data;
3558 
3559 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3560 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3561 				      MGMT_STATUS_INVALID_PARAMS);
3562 		goto unlock;
3563 	}
3564 
3565 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3566 	mgmt_pending_remove(cmd);
3567 
3568 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3569 				addr, sizeof(*addr));
3570 
3571 	/* Since user doesn't want to proceed with the connection, abort any
3572 	 * ongoing pairing and then terminate the link if it was created
3573 	 * because of the pair device action.
3574 	 */
3575 	if (addr->type == BDADDR_BREDR)
3576 		hci_remove_link_key(hdev, &addr->bdaddr);
3577 	else
3578 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3579 					      le_addr_type(addr->type));
3580 
3581 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3582 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3583 
3584 unlock:
3585 	hci_dev_unlock(hdev);
3586 	return err;
3587 }
3588 
3589 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3590 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3591 			     u16 hci_op, __le32 passkey)
3592 {
3593 	struct mgmt_pending_cmd *cmd;
3594 	struct hci_conn *conn;
3595 	int err;
3596 
3597 	hci_dev_lock(hdev);
3598 
3599 	if (!hdev_is_powered(hdev)) {
3600 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3601 					MGMT_STATUS_NOT_POWERED, addr,
3602 					sizeof(*addr));
3603 		goto done;
3604 	}
3605 
3606 	if (addr->type == BDADDR_BREDR)
3607 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3608 	else
3609 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3610 					       le_addr_type(addr->type));
3611 
3612 	if (!conn) {
3613 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3614 					MGMT_STATUS_NOT_CONNECTED, addr,
3615 					sizeof(*addr));
3616 		goto done;
3617 	}
3618 
3619 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3620 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3621 		if (!err)
3622 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623 						MGMT_STATUS_SUCCESS, addr,
3624 						sizeof(*addr));
3625 		else
3626 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3627 						MGMT_STATUS_FAILED, addr,
3628 						sizeof(*addr));
3629 
3630 		goto done;
3631 	}
3632 
3633 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3634 	if (!cmd) {
3635 		err = -ENOMEM;
3636 		goto done;
3637 	}
3638 
3639 	cmd->cmd_complete = addr_cmd_complete;
3640 
3641 	/* Continue with pairing via HCI */
3642 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3643 		struct hci_cp_user_passkey_reply cp;
3644 
3645 		bacpy(&cp.bdaddr, &addr->bdaddr);
3646 		cp.passkey = passkey;
3647 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3648 	} else
3649 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3650 				   &addr->bdaddr);
3651 
3652 	if (err < 0)
3653 		mgmt_pending_remove(cmd);
3654 
3655 done:
3656 	hci_dev_unlock(hdev);
3657 	return err;
3658 }
3659 
3660 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3661 			      void *data, u16 len)
3662 {
3663 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3664 
3665 	bt_dev_dbg(hdev, "sock %p", sk);
3666 
3667 	return user_pairing_resp(sk, hdev, &cp->addr,
3668 				MGMT_OP_PIN_CODE_NEG_REPLY,
3669 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3670 }
3671 
3672 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3673 			      u16 len)
3674 {
3675 	struct mgmt_cp_user_confirm_reply *cp = data;
3676 
3677 	bt_dev_dbg(hdev, "sock %p", sk);
3678 
3679 	if (len != sizeof(*cp))
3680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3681 				       MGMT_STATUS_INVALID_PARAMS);
3682 
3683 	return user_pairing_resp(sk, hdev, &cp->addr,
3684 				 MGMT_OP_USER_CONFIRM_REPLY,
3685 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3686 }
3687 
3688 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3689 				  void *data, u16 len)
3690 {
3691 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3692 
3693 	bt_dev_dbg(hdev, "sock %p", sk);
3694 
3695 	return user_pairing_resp(sk, hdev, &cp->addr,
3696 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3697 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3698 }
3699 
3700 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3701 			      u16 len)
3702 {
3703 	struct mgmt_cp_user_passkey_reply *cp = data;
3704 
3705 	bt_dev_dbg(hdev, "sock %p", sk);
3706 
3707 	return user_pairing_resp(sk, hdev, &cp->addr,
3708 				 MGMT_OP_USER_PASSKEY_REPLY,
3709 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3710 }
3711 
3712 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3713 				  void *data, u16 len)
3714 {
3715 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3716 
3717 	bt_dev_dbg(hdev, "sock %p", sk);
3718 
3719 	return user_pairing_resp(sk, hdev, &cp->addr,
3720 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3721 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3722 }
3723 
3724 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3725 {
3726 	struct adv_info *adv_instance;
3727 
3728 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3729 	if (!adv_instance)
3730 		return 0;
3731 
3732 	/* stop if current instance doesn't need to be changed */
3733 	if (!(adv_instance->flags & flags))
3734 		return 0;
3735 
3736 	cancel_adv_timeout(hdev);
3737 
3738 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3739 	if (!adv_instance)
3740 		return 0;
3741 
3742 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3743 
3744 	return 0;
3745 }
3746 
3747 static int name_changed_sync(struct hci_dev *hdev, void *data)
3748 {
3749 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3750 }
3751 
3752 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3753 {
3754 	struct mgmt_pending_cmd *cmd = data;
3755 	struct mgmt_cp_set_local_name *cp = cmd->param;
3756 	u8 status = mgmt_status(err);
3757 
3758 	bt_dev_dbg(hdev, "err %d", err);
3759 
3760 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3761 		return;
3762 
3763 	if (status) {
3764 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3765 				status);
3766 	} else {
3767 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3768 				  cp, sizeof(*cp));
3769 
3770 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3771 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3772 	}
3773 
3774 	mgmt_pending_remove(cmd);
3775 }
3776 
3777 static int set_name_sync(struct hci_dev *hdev, void *data)
3778 {
3779 	if (lmp_bredr_capable(hdev)) {
3780 		hci_update_name_sync(hdev);
3781 		hci_update_eir_sync(hdev);
3782 	}
3783 
3784 	/* The name is stored in the scan response data and so
3785 	 * no need to update the advertising data here.
3786 	 */
3787 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3788 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3789 
3790 	return 0;
3791 }
3792 
3793 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3794 			  u16 len)
3795 {
3796 	struct mgmt_cp_set_local_name *cp = data;
3797 	struct mgmt_pending_cmd *cmd;
3798 	int err;
3799 
3800 	bt_dev_dbg(hdev, "sock %p", sk);
3801 
3802 	hci_dev_lock(hdev);
3803 
3804 	/* If the old values are the same as the new ones just return a
3805 	 * direct command complete event.
3806 	 */
3807 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3808 	    !memcmp(hdev->short_name, cp->short_name,
3809 		    sizeof(hdev->short_name))) {
3810 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3811 					data, len);
3812 		goto failed;
3813 	}
3814 
3815 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3816 
3817 	if (!hdev_is_powered(hdev)) {
3818 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3819 
3820 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3821 					data, len);
3822 		if (err < 0)
3823 			goto failed;
3824 
3825 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3826 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3827 		ext_info_changed(hdev, sk);
3828 
3829 		goto failed;
3830 	}
3831 
3832 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3833 	if (!cmd)
3834 		err = -ENOMEM;
3835 	else
3836 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3837 					 set_name_complete);
3838 
3839 	if (err < 0) {
3840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3841 				      MGMT_STATUS_FAILED);
3842 
3843 		if (cmd)
3844 			mgmt_pending_remove(cmd);
3845 
3846 		goto failed;
3847 	}
3848 
3849 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3850 
3851 failed:
3852 	hci_dev_unlock(hdev);
3853 	return err;
3854 }
3855 
3856 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3857 {
3858 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3859 }
3860 
3861 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3862 			  u16 len)
3863 {
3864 	struct mgmt_cp_set_appearance *cp = data;
3865 	u16 appearance;
3866 	int err;
3867 
3868 	bt_dev_dbg(hdev, "sock %p", sk);
3869 
3870 	if (!lmp_le_capable(hdev))
3871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3872 				       MGMT_STATUS_NOT_SUPPORTED);
3873 
3874 	appearance = le16_to_cpu(cp->appearance);
3875 
3876 	hci_dev_lock(hdev);
3877 
3878 	if (hdev->appearance != appearance) {
3879 		hdev->appearance = appearance;
3880 
3881 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3882 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3883 					   NULL);
3884 
3885 		ext_info_changed(hdev, sk);
3886 	}
3887 
3888 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3889 				0);
3890 
3891 	hci_dev_unlock(hdev);
3892 
3893 	return err;
3894 }
3895 
3896 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3897 				 void *data, u16 len)
3898 {
3899 	struct mgmt_rp_get_phy_configuration rp;
3900 
3901 	bt_dev_dbg(hdev, "sock %p", sk);
3902 
3903 	hci_dev_lock(hdev);
3904 
3905 	memset(&rp, 0, sizeof(rp));
3906 
3907 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3908 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3909 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3910 
3911 	hci_dev_unlock(hdev);
3912 
3913 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3914 				 &rp, sizeof(rp));
3915 }
3916 
3917 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3918 {
3919 	struct mgmt_ev_phy_configuration_changed ev;
3920 
3921 	memset(&ev, 0, sizeof(ev));
3922 
3923 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3924 
3925 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3926 			  sizeof(ev), skip);
3927 }
3928 
3929 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3930 {
3931 	struct mgmt_pending_cmd *cmd = data;
3932 	struct sk_buff *skb = cmd->skb;
3933 	u8 status = mgmt_status(err);
3934 
3935 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3936 		return;
3937 
3938 	if (!status) {
3939 		if (!skb)
3940 			status = MGMT_STATUS_FAILED;
3941 		else if (IS_ERR(skb))
3942 			status = mgmt_status(PTR_ERR(skb));
3943 		else
3944 			status = mgmt_status(skb->data[0]);
3945 	}
3946 
3947 	bt_dev_dbg(hdev, "status %d", status);
3948 
3949 	if (status) {
3950 		mgmt_cmd_status(cmd->sk, hdev->id,
3951 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3952 	} else {
3953 		mgmt_cmd_complete(cmd->sk, hdev->id,
3954 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3955 				  NULL, 0);
3956 
3957 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3958 	}
3959 
3960 	if (skb && !IS_ERR(skb))
3961 		kfree_skb(skb);
3962 
3963 	mgmt_pending_remove(cmd);
3964 }
3965 
3966 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3967 {
3968 	struct mgmt_pending_cmd *cmd = data;
3969 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3970 	struct hci_cp_le_set_default_phy cp_phy;
3971 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3972 
3973 	memset(&cp_phy, 0, sizeof(cp_phy));
3974 
3975 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3976 		cp_phy.all_phys |= 0x01;
3977 
3978 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3979 		cp_phy.all_phys |= 0x02;
3980 
3981 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3982 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3983 
3984 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3985 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3986 
3987 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3988 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3989 
3990 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3991 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3992 
3993 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3994 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3995 
3996 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3997 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3998 
3999 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4000 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4001 
4002 	return 0;
4003 }
4004 
4005 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4006 				 void *data, u16 len)
4007 {
4008 	struct mgmt_cp_set_phy_configuration *cp = data;
4009 	struct mgmt_pending_cmd *cmd;
4010 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4011 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4012 	bool changed = false;
4013 	int err;
4014 
4015 	bt_dev_dbg(hdev, "sock %p", sk);
4016 
4017 	configurable_phys = get_configurable_phys(hdev);
4018 	supported_phys = get_supported_phys(hdev);
4019 	selected_phys = __le32_to_cpu(cp->selected_phys);
4020 
4021 	if (selected_phys & ~supported_phys)
4022 		return mgmt_cmd_status(sk, hdev->id,
4023 				       MGMT_OP_SET_PHY_CONFIGURATION,
4024 				       MGMT_STATUS_INVALID_PARAMS);
4025 
4026 	unconfigure_phys = supported_phys & ~configurable_phys;
4027 
4028 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4029 		return mgmt_cmd_status(sk, hdev->id,
4030 				       MGMT_OP_SET_PHY_CONFIGURATION,
4031 				       MGMT_STATUS_INVALID_PARAMS);
4032 
4033 	if (selected_phys == get_selected_phys(hdev))
4034 		return mgmt_cmd_complete(sk, hdev->id,
4035 					 MGMT_OP_SET_PHY_CONFIGURATION,
4036 					 0, NULL, 0);
4037 
4038 	hci_dev_lock(hdev);
4039 
4040 	if (!hdev_is_powered(hdev)) {
4041 		err = mgmt_cmd_status(sk, hdev->id,
4042 				      MGMT_OP_SET_PHY_CONFIGURATION,
4043 				      MGMT_STATUS_REJECTED);
4044 		goto unlock;
4045 	}
4046 
4047 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4048 		err = mgmt_cmd_status(sk, hdev->id,
4049 				      MGMT_OP_SET_PHY_CONFIGURATION,
4050 				      MGMT_STATUS_BUSY);
4051 		goto unlock;
4052 	}
4053 
4054 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4055 		pkt_type |= (HCI_DH3 | HCI_DM3);
4056 	else
4057 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4058 
4059 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4060 		pkt_type |= (HCI_DH5 | HCI_DM5);
4061 	else
4062 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4063 
4064 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4065 		pkt_type &= ~HCI_2DH1;
4066 	else
4067 		pkt_type |= HCI_2DH1;
4068 
4069 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4070 		pkt_type &= ~HCI_2DH3;
4071 	else
4072 		pkt_type |= HCI_2DH3;
4073 
4074 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4075 		pkt_type &= ~HCI_2DH5;
4076 	else
4077 		pkt_type |= HCI_2DH5;
4078 
4079 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4080 		pkt_type &= ~HCI_3DH1;
4081 	else
4082 		pkt_type |= HCI_3DH1;
4083 
4084 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4085 		pkt_type &= ~HCI_3DH3;
4086 	else
4087 		pkt_type |= HCI_3DH3;
4088 
4089 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4090 		pkt_type &= ~HCI_3DH5;
4091 	else
4092 		pkt_type |= HCI_3DH5;
4093 
4094 	if (pkt_type != hdev->pkt_type) {
4095 		hdev->pkt_type = pkt_type;
4096 		changed = true;
4097 	}
4098 
4099 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4100 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4101 		if (changed)
4102 			mgmt_phy_configuration_changed(hdev, sk);
4103 
4104 		err = mgmt_cmd_complete(sk, hdev->id,
4105 					MGMT_OP_SET_PHY_CONFIGURATION,
4106 					0, NULL, 0);
4107 
4108 		goto unlock;
4109 	}
4110 
4111 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4112 			       len);
4113 	if (!cmd)
4114 		err = -ENOMEM;
4115 	else
4116 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4117 					 set_default_phy_complete);
4118 
4119 	if (err < 0) {
4120 		err = mgmt_cmd_status(sk, hdev->id,
4121 				      MGMT_OP_SET_PHY_CONFIGURATION,
4122 				      MGMT_STATUS_FAILED);
4123 
4124 		if (cmd)
4125 			mgmt_pending_remove(cmd);
4126 	}
4127 
4128 unlock:
4129 	hci_dev_unlock(hdev);
4130 
4131 	return err;
4132 }
4133 
4134 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4135 			    u16 len)
4136 {
4137 	int err = MGMT_STATUS_SUCCESS;
4138 	struct mgmt_cp_set_blocked_keys *keys = data;
4139 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4140 				   sizeof(struct mgmt_blocked_key_info));
4141 	u16 key_count, expected_len;
4142 	int i;
4143 
4144 	bt_dev_dbg(hdev, "sock %p", sk);
4145 
4146 	key_count = __le16_to_cpu(keys->key_count);
4147 	if (key_count > max_key_count) {
4148 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4149 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4150 				       MGMT_STATUS_INVALID_PARAMS);
4151 	}
4152 
4153 	expected_len = struct_size(keys, keys, key_count);
4154 	if (expected_len != len) {
4155 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4156 			   expected_len, len);
4157 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4158 				       MGMT_STATUS_INVALID_PARAMS);
4159 	}
4160 
4161 	hci_dev_lock(hdev);
4162 
4163 	hci_blocked_keys_clear(hdev);
4164 
4165 	for (i = 0; i < key_count; ++i) {
4166 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4167 
4168 		if (!b) {
4169 			err = MGMT_STATUS_NO_RESOURCES;
4170 			break;
4171 		}
4172 
4173 		b->type = keys->keys[i].type;
4174 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4175 		list_add_rcu(&b->list, &hdev->blocked_keys);
4176 	}
4177 	hci_dev_unlock(hdev);
4178 
4179 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4180 				err, NULL, 0);
4181 }
4182 
4183 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4184 			       void *data, u16 len)
4185 {
4186 	struct mgmt_mode *cp = data;
4187 	int err;
4188 	bool changed = false;
4189 
4190 	bt_dev_dbg(hdev, "sock %p", sk);
4191 
4192 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4193 		return mgmt_cmd_status(sk, hdev->id,
4194 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4195 				       MGMT_STATUS_NOT_SUPPORTED);
4196 
4197 	if (cp->val != 0x00 && cp->val != 0x01)
4198 		return mgmt_cmd_status(sk, hdev->id,
4199 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4200 				       MGMT_STATUS_INVALID_PARAMS);
4201 
4202 	hci_dev_lock(hdev);
4203 
4204 	if (hdev_is_powered(hdev) &&
4205 	    !!cp->val != hci_dev_test_flag(hdev,
4206 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4207 		err = mgmt_cmd_status(sk, hdev->id,
4208 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4209 				      MGMT_STATUS_REJECTED);
4210 		goto unlock;
4211 	}
4212 
4213 	if (cp->val)
4214 		changed = !hci_dev_test_and_set_flag(hdev,
4215 						   HCI_WIDEBAND_SPEECH_ENABLED);
4216 	else
4217 		changed = hci_dev_test_and_clear_flag(hdev,
4218 						   HCI_WIDEBAND_SPEECH_ENABLED);
4219 
4220 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4221 	if (err < 0)
4222 		goto unlock;
4223 
4224 	if (changed)
4225 		err = new_settings(hdev, sk);
4226 
4227 unlock:
4228 	hci_dev_unlock(hdev);
4229 	return err;
4230 }
4231 
4232 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4233 			       void *data, u16 data_len)
4234 {
4235 	char buf[20];
4236 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4237 	u16 cap_len = 0;
4238 	u8 flags = 0;
4239 	u8 tx_power_range[2];
4240 
4241 	bt_dev_dbg(hdev, "sock %p", sk);
4242 
4243 	memset(&buf, 0, sizeof(buf));
4244 
4245 	hci_dev_lock(hdev);
4246 
4247 	/* When the Read Simple Pairing Options command is supported, then
4248 	 * the remote public key validation is supported.
4249 	 *
4250 	 * Alternatively, when Microsoft extensions are available, they can
4251 	 * indicate support for public key validation as well.
4252 	 */
4253 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4254 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4255 
4256 	flags |= 0x02;		/* Remote public key validation (LE) */
4257 
4258 	/* When the Read Encryption Key Size command is supported, then the
4259 	 * encryption key size is enforced.
4260 	 */
4261 	if (hdev->commands[20] & 0x10)
4262 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4263 
4264 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4265 
4266 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4267 				  &flags, 1);
4268 
4269 	/* When the Read Simple Pairing Options command is supported, then
4270 	 * also max encryption key size information is provided.
4271 	 */
4272 	if (hdev->commands[41] & 0x08)
4273 		cap_len = eir_append_le16(rp->cap, cap_len,
4274 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4275 					  hdev->max_enc_key_size);
4276 
4277 	cap_len = eir_append_le16(rp->cap, cap_len,
4278 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4279 				  SMP_MAX_ENC_KEY_SIZE);
4280 
4281 	/* Append the min/max LE tx power parameters if we were able to fetch
4282 	 * it from the controller
4283 	 */
4284 	if (hdev->commands[38] & 0x80) {
4285 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4286 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4287 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4288 					  tx_power_range, 2);
4289 	}
4290 
4291 	rp->cap_len = cpu_to_le16(cap_len);
4292 
4293 	hci_dev_unlock(hdev);
4294 
4295 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4296 				 rp, sizeof(*rp) + cap_len);
4297 }
4298 
4299 #ifdef CONFIG_BT_FEATURE_DEBUG
4300 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4301 static const u8 debug_uuid[16] = {
4302 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4303 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4304 };
4305 #endif
4306 
4307 /* 330859bc-7506-492d-9370-9a6f0614037f */
4308 static const u8 quality_report_uuid[16] = {
4309 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4310 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4311 };
4312 
4313 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4314 static const u8 offload_codecs_uuid[16] = {
4315 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4316 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4317 };
4318 
4319 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4320 static const u8 le_simultaneous_roles_uuid[16] = {
4321 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4322 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4323 };
4324 
4325 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4326 static const u8 rpa_resolution_uuid[16] = {
4327 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4328 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4329 };
4330 
4331 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4332 static const u8 iso_socket_uuid[16] = {
4333 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4334 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4335 };
4336 
4337 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4338 static const u8 mgmt_mesh_uuid[16] = {
4339 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4340 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4341 };
4342 
4343 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4344 				  void *data, u16 data_len)
4345 {
4346 	struct mgmt_rp_read_exp_features_info *rp;
4347 	size_t len;
4348 	u16 idx = 0;
4349 	u32 flags;
4350 	int status;
4351 
4352 	bt_dev_dbg(hdev, "sock %p", sk);
4353 
4354 	/* Enough space for 7 features */
4355 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4356 	rp = kzalloc(len, GFP_KERNEL);
4357 	if (!rp)
4358 		return -ENOMEM;
4359 
4360 #ifdef CONFIG_BT_FEATURE_DEBUG
4361 	if (!hdev) {
4362 		flags = bt_dbg_get() ? BIT(0) : 0;
4363 
4364 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4365 		rp->features[idx].flags = cpu_to_le32(flags);
4366 		idx++;
4367 	}
4368 #endif
4369 
4370 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4371 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4372 			flags = BIT(0);
4373 		else
4374 			flags = 0;
4375 
4376 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4377 		rp->features[idx].flags = cpu_to_le32(flags);
4378 		idx++;
4379 	}
4380 
4381 	if (hdev && ll_privacy_capable(hdev)) {
4382 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4383 			flags = BIT(0) | BIT(1);
4384 		else
4385 			flags = BIT(1);
4386 
4387 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4388 		rp->features[idx].flags = cpu_to_le32(flags);
4389 		idx++;
4390 	}
4391 
4392 	if (hdev && (aosp_has_quality_report(hdev) ||
4393 		     hdev->set_quality_report)) {
4394 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4395 			flags = BIT(0);
4396 		else
4397 			flags = 0;
4398 
4399 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4400 		rp->features[idx].flags = cpu_to_le32(flags);
4401 		idx++;
4402 	}
4403 
4404 	if (hdev && hdev->get_data_path_id) {
4405 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4406 			flags = BIT(0);
4407 		else
4408 			flags = 0;
4409 
4410 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4411 		rp->features[idx].flags = cpu_to_le32(flags);
4412 		idx++;
4413 	}
4414 
4415 	if (IS_ENABLED(CONFIG_BT_LE)) {
4416 		flags = iso_enabled() ? BIT(0) : 0;
4417 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4418 		rp->features[idx].flags = cpu_to_le32(flags);
4419 		idx++;
4420 	}
4421 
4422 	if (hdev && lmp_le_capable(hdev)) {
4423 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4424 			flags = BIT(0);
4425 		else
4426 			flags = 0;
4427 
4428 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4429 		rp->features[idx].flags = cpu_to_le32(flags);
4430 		idx++;
4431 	}
4432 
4433 	rp->feature_count = cpu_to_le16(idx);
4434 
4435 	/* After reading the experimental features information, enable
4436 	 * the events to update client on any future change.
4437 	 */
4438 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4439 
4440 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4441 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4442 				   0, rp, sizeof(*rp) + (20 * idx));
4443 
4444 	kfree(rp);
4445 	return status;
4446 }
4447 
4448 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4449 					  struct sock *skip)
4450 {
4451 	struct mgmt_ev_exp_feature_changed ev;
4452 
4453 	memset(&ev, 0, sizeof(ev));
4454 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4455 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4456 
4457 	// Do we need to be atomic with the conn_flags?
4458 	if (enabled && privacy_mode_capable(hdev))
4459 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4460 	else
4461 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4462 
4463 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4464 				  &ev, sizeof(ev),
4465 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4466 
4467 }
4468 
4469 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4470 			       bool enabled, struct sock *skip)
4471 {
4472 	struct mgmt_ev_exp_feature_changed ev;
4473 
4474 	memset(&ev, 0, sizeof(ev));
4475 	memcpy(ev.uuid, uuid, 16);
4476 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4477 
4478 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4479 				  &ev, sizeof(ev),
4480 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4481 }
4482 
4483 #define EXP_FEAT(_uuid, _set_func)	\
4484 {					\
4485 	.uuid = _uuid,			\
4486 	.set_func = _set_func,		\
4487 }
4488 
4489 /* The zero key uuid is special. Multiple exp features are set through it. */
4490 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4491 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4492 {
4493 	struct mgmt_rp_set_exp_feature rp;
4494 
4495 	memset(rp.uuid, 0, 16);
4496 	rp.flags = cpu_to_le32(0);
4497 
4498 #ifdef CONFIG_BT_FEATURE_DEBUG
4499 	if (!hdev) {
4500 		bool changed = bt_dbg_get();
4501 
4502 		bt_dbg_set(false);
4503 
4504 		if (changed)
4505 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4506 	}
4507 #endif
4508 
4509 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4510 		bool changed;
4511 
4512 		changed = hci_dev_test_and_clear_flag(hdev,
4513 						      HCI_ENABLE_LL_PRIVACY);
4514 		if (changed)
4515 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4516 					    sk);
4517 	}
4518 
4519 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4520 
4521 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4522 				 MGMT_OP_SET_EXP_FEATURE, 0,
4523 				 &rp, sizeof(rp));
4524 }
4525 
4526 #ifdef CONFIG_BT_FEATURE_DEBUG
4527 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4528 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4529 {
4530 	struct mgmt_rp_set_exp_feature rp;
4531 
4532 	bool val, changed;
4533 	int err;
4534 
4535 	/* Command requires to use the non-controller index */
4536 	if (hdev)
4537 		return mgmt_cmd_status(sk, hdev->id,
4538 				       MGMT_OP_SET_EXP_FEATURE,
4539 				       MGMT_STATUS_INVALID_INDEX);
4540 
4541 	/* Parameters are limited to a single octet */
4542 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4543 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4544 				       MGMT_OP_SET_EXP_FEATURE,
4545 				       MGMT_STATUS_INVALID_PARAMS);
4546 
4547 	/* Only boolean on/off is supported */
4548 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4549 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4550 				       MGMT_OP_SET_EXP_FEATURE,
4551 				       MGMT_STATUS_INVALID_PARAMS);
4552 
4553 	val = !!cp->param[0];
4554 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4555 	bt_dbg_set(val);
4556 
4557 	memcpy(rp.uuid, debug_uuid, 16);
4558 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4559 
4560 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4561 
4562 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4563 				MGMT_OP_SET_EXP_FEATURE, 0,
4564 				&rp, sizeof(rp));
4565 
4566 	if (changed)
4567 		exp_feature_changed(hdev, debug_uuid, val, sk);
4568 
4569 	return err;
4570 }
4571 #endif
4572 
4573 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4574 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4575 {
4576 	struct mgmt_rp_set_exp_feature rp;
4577 	bool val, changed;
4578 	int err;
4579 
4580 	/* Command requires to use the controller index */
4581 	if (!hdev)
4582 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4583 				       MGMT_OP_SET_EXP_FEATURE,
4584 				       MGMT_STATUS_INVALID_INDEX);
4585 
4586 	/* Parameters are limited to a single octet */
4587 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4588 		return mgmt_cmd_status(sk, hdev->id,
4589 				       MGMT_OP_SET_EXP_FEATURE,
4590 				       MGMT_STATUS_INVALID_PARAMS);
4591 
4592 	/* Only boolean on/off is supported */
4593 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4594 		return mgmt_cmd_status(sk, hdev->id,
4595 				       MGMT_OP_SET_EXP_FEATURE,
4596 				       MGMT_STATUS_INVALID_PARAMS);
4597 
4598 	val = !!cp->param[0];
4599 
4600 	if (val) {
4601 		changed = !hci_dev_test_and_set_flag(hdev,
4602 						     HCI_MESH_EXPERIMENTAL);
4603 	} else {
4604 		hci_dev_clear_flag(hdev, HCI_MESH);
4605 		changed = hci_dev_test_and_clear_flag(hdev,
4606 						      HCI_MESH_EXPERIMENTAL);
4607 	}
4608 
4609 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4610 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4611 
4612 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4613 
4614 	err = mgmt_cmd_complete(sk, hdev->id,
4615 				MGMT_OP_SET_EXP_FEATURE, 0,
4616 				&rp, sizeof(rp));
4617 
4618 	if (changed)
4619 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4620 
4621 	return err;
4622 }
4623 
4624 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4625 				   struct mgmt_cp_set_exp_feature *cp,
4626 				   u16 data_len)
4627 {
4628 	struct mgmt_rp_set_exp_feature rp;
4629 	bool val, changed;
4630 	int err;
4631 	u32 flags;
4632 
4633 	/* Command requires to use the controller index */
4634 	if (!hdev)
4635 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4636 				       MGMT_OP_SET_EXP_FEATURE,
4637 				       MGMT_STATUS_INVALID_INDEX);
4638 
4639 	/* Changes can only be made when controller is powered down */
4640 	if (hdev_is_powered(hdev))
4641 		return mgmt_cmd_status(sk, hdev->id,
4642 				       MGMT_OP_SET_EXP_FEATURE,
4643 				       MGMT_STATUS_REJECTED);
4644 
4645 	/* Parameters are limited to a single octet */
4646 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4647 		return mgmt_cmd_status(sk, hdev->id,
4648 				       MGMT_OP_SET_EXP_FEATURE,
4649 				       MGMT_STATUS_INVALID_PARAMS);
4650 
4651 	/* Only boolean on/off is supported */
4652 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4653 		return mgmt_cmd_status(sk, hdev->id,
4654 				       MGMT_OP_SET_EXP_FEATURE,
4655 				       MGMT_STATUS_INVALID_PARAMS);
4656 
4657 	val = !!cp->param[0];
4658 
4659 	if (val) {
4660 		changed = !hci_dev_test_and_set_flag(hdev,
4661 						     HCI_ENABLE_LL_PRIVACY);
4662 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4663 
4664 		/* Enable LL privacy + supported settings changed */
4665 		flags = BIT(0) | BIT(1);
4666 	} else {
4667 		changed = hci_dev_test_and_clear_flag(hdev,
4668 						      HCI_ENABLE_LL_PRIVACY);
4669 
4670 		/* Disable LL privacy + supported settings changed */
4671 		flags = BIT(1);
4672 	}
4673 
4674 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4675 	rp.flags = cpu_to_le32(flags);
4676 
4677 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4678 
4679 	err = mgmt_cmd_complete(sk, hdev->id,
4680 				MGMT_OP_SET_EXP_FEATURE, 0,
4681 				&rp, sizeof(rp));
4682 
4683 	if (changed)
4684 		exp_ll_privacy_feature_changed(val, hdev, sk);
4685 
4686 	return err;
4687 }
4688 
4689 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4690 				   struct mgmt_cp_set_exp_feature *cp,
4691 				   u16 data_len)
4692 {
4693 	struct mgmt_rp_set_exp_feature rp;
4694 	bool val, changed;
4695 	int err;
4696 
4697 	/* Command requires to use a valid controller index */
4698 	if (!hdev)
4699 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700 				       MGMT_OP_SET_EXP_FEATURE,
4701 				       MGMT_STATUS_INVALID_INDEX);
4702 
4703 	/* Parameters are limited to a single octet */
4704 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4705 		return mgmt_cmd_status(sk, hdev->id,
4706 				       MGMT_OP_SET_EXP_FEATURE,
4707 				       MGMT_STATUS_INVALID_PARAMS);
4708 
4709 	/* Only boolean on/off is supported */
4710 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4711 		return mgmt_cmd_status(sk, hdev->id,
4712 				       MGMT_OP_SET_EXP_FEATURE,
4713 				       MGMT_STATUS_INVALID_PARAMS);
4714 
4715 	hci_req_sync_lock(hdev);
4716 
4717 	val = !!cp->param[0];
4718 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4719 
4720 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4721 		err = mgmt_cmd_status(sk, hdev->id,
4722 				      MGMT_OP_SET_EXP_FEATURE,
4723 				      MGMT_STATUS_NOT_SUPPORTED);
4724 		goto unlock_quality_report;
4725 	}
4726 
4727 	if (changed) {
4728 		if (hdev->set_quality_report)
4729 			err = hdev->set_quality_report(hdev, val);
4730 		else
4731 			err = aosp_set_quality_report(hdev, val);
4732 
4733 		if (err) {
4734 			err = mgmt_cmd_status(sk, hdev->id,
4735 					      MGMT_OP_SET_EXP_FEATURE,
4736 					      MGMT_STATUS_FAILED);
4737 			goto unlock_quality_report;
4738 		}
4739 
4740 		if (val)
4741 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4742 		else
4743 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4744 	}
4745 
4746 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4747 
4748 	memcpy(rp.uuid, quality_report_uuid, 16);
4749 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4750 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4751 
4752 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4753 				&rp, sizeof(rp));
4754 
4755 	if (changed)
4756 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4757 
4758 unlock_quality_report:
4759 	hci_req_sync_unlock(hdev);
4760 	return err;
4761 }
4762 
4763 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4764 				  struct mgmt_cp_set_exp_feature *cp,
4765 				  u16 data_len)
4766 {
4767 	bool val, changed;
4768 	int err;
4769 	struct mgmt_rp_set_exp_feature rp;
4770 
4771 	/* Command requires to use a valid controller index */
4772 	if (!hdev)
4773 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4774 				       MGMT_OP_SET_EXP_FEATURE,
4775 				       MGMT_STATUS_INVALID_INDEX);
4776 
4777 	/* Parameters are limited to a single octet */
4778 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4779 		return mgmt_cmd_status(sk, hdev->id,
4780 				       MGMT_OP_SET_EXP_FEATURE,
4781 				       MGMT_STATUS_INVALID_PARAMS);
4782 
4783 	/* Only boolean on/off is supported */
4784 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4785 		return mgmt_cmd_status(sk, hdev->id,
4786 				       MGMT_OP_SET_EXP_FEATURE,
4787 				       MGMT_STATUS_INVALID_PARAMS);
4788 
4789 	val = !!cp->param[0];
4790 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4791 
4792 	if (!hdev->get_data_path_id) {
4793 		return mgmt_cmd_status(sk, hdev->id,
4794 				       MGMT_OP_SET_EXP_FEATURE,
4795 				       MGMT_STATUS_NOT_SUPPORTED);
4796 	}
4797 
4798 	if (changed) {
4799 		if (val)
4800 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4801 		else
4802 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4803 	}
4804 
4805 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4806 		    val, changed);
4807 
4808 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4809 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4810 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4811 	err = mgmt_cmd_complete(sk, hdev->id,
4812 				MGMT_OP_SET_EXP_FEATURE, 0,
4813 				&rp, sizeof(rp));
4814 
4815 	if (changed)
4816 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4817 
4818 	return err;
4819 }
4820 
4821 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4822 					  struct mgmt_cp_set_exp_feature *cp,
4823 					  u16 data_len)
4824 {
4825 	bool val, changed;
4826 	int err;
4827 	struct mgmt_rp_set_exp_feature rp;
4828 
4829 	/* Command requires to use a valid controller index */
4830 	if (!hdev)
4831 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4832 				       MGMT_OP_SET_EXP_FEATURE,
4833 				       MGMT_STATUS_INVALID_INDEX);
4834 
4835 	/* Parameters are limited to a single octet */
4836 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4837 		return mgmt_cmd_status(sk, hdev->id,
4838 				       MGMT_OP_SET_EXP_FEATURE,
4839 				       MGMT_STATUS_INVALID_PARAMS);
4840 
4841 	/* Only boolean on/off is supported */
4842 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4843 		return mgmt_cmd_status(sk, hdev->id,
4844 				       MGMT_OP_SET_EXP_FEATURE,
4845 				       MGMT_STATUS_INVALID_PARAMS);
4846 
4847 	val = !!cp->param[0];
4848 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4849 
4850 	if (!hci_dev_le_state_simultaneous(hdev)) {
4851 		return mgmt_cmd_status(sk, hdev->id,
4852 				       MGMT_OP_SET_EXP_FEATURE,
4853 				       MGMT_STATUS_NOT_SUPPORTED);
4854 	}
4855 
4856 	if (changed) {
4857 		if (val)
4858 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4859 		else
4860 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4861 	}
4862 
4863 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4864 		    val, changed);
4865 
4866 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4867 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4868 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4869 	err = mgmt_cmd_complete(sk, hdev->id,
4870 				MGMT_OP_SET_EXP_FEATURE, 0,
4871 				&rp, sizeof(rp));
4872 
4873 	if (changed)
4874 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4875 
4876 	return err;
4877 }
4878 
4879 #ifdef CONFIG_BT_LE
4880 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4881 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4882 {
4883 	struct mgmt_rp_set_exp_feature rp;
4884 	bool val, changed = false;
4885 	int err;
4886 
4887 	/* Command requires to use the non-controller index */
4888 	if (hdev)
4889 		return mgmt_cmd_status(sk, hdev->id,
4890 				       MGMT_OP_SET_EXP_FEATURE,
4891 				       MGMT_STATUS_INVALID_INDEX);
4892 
4893 	/* Parameters are limited to a single octet */
4894 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4895 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4896 				       MGMT_OP_SET_EXP_FEATURE,
4897 				       MGMT_STATUS_INVALID_PARAMS);
4898 
4899 	/* Only boolean on/off is supported */
4900 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4901 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4902 				       MGMT_OP_SET_EXP_FEATURE,
4903 				       MGMT_STATUS_INVALID_PARAMS);
4904 
4905 	val = cp->param[0] ? true : false;
4906 	if (val)
4907 		err = iso_init();
4908 	else
4909 		err = iso_exit();
4910 
4911 	if (!err)
4912 		changed = true;
4913 
4914 	memcpy(rp.uuid, iso_socket_uuid, 16);
4915 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4916 
4917 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4918 
4919 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4920 				MGMT_OP_SET_EXP_FEATURE, 0,
4921 				&rp, sizeof(rp));
4922 
4923 	if (changed)
4924 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4925 
4926 	return err;
4927 }
4928 #endif
4929 
4930 static const struct mgmt_exp_feature {
4931 	const u8 *uuid;
4932 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4933 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4934 } exp_features[] = {
4935 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4936 #ifdef CONFIG_BT_FEATURE_DEBUG
4937 	EXP_FEAT(debug_uuid, set_debug_func),
4938 #endif
4939 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4940 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4941 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4942 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4943 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4944 #ifdef CONFIG_BT_LE
4945 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4946 #endif
4947 
4948 	/* end with a null feature */
4949 	EXP_FEAT(NULL, NULL)
4950 };
4951 
4952 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4953 			   void *data, u16 data_len)
4954 {
4955 	struct mgmt_cp_set_exp_feature *cp = data;
4956 	size_t i = 0;
4957 
4958 	bt_dev_dbg(hdev, "sock %p", sk);
4959 
4960 	for (i = 0; exp_features[i].uuid; i++) {
4961 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4962 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4963 	}
4964 
4965 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4966 			       MGMT_OP_SET_EXP_FEATURE,
4967 			       MGMT_STATUS_NOT_SUPPORTED);
4968 }
4969 
4970 static u32 get_params_flags(struct hci_dev *hdev,
4971 			    struct hci_conn_params *params)
4972 {
4973 	u32 flags = hdev->conn_flags;
4974 
4975 	/* Devices using RPAs can only be programmed in the acceptlist if
4976 	 * LL Privacy has been enable otherwise they cannot mark
4977 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4978 	 */
4979 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4980 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4981 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4982 
4983 	return flags;
4984 }
4985 
4986 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4987 			    u16 data_len)
4988 {
4989 	struct mgmt_cp_get_device_flags *cp = data;
4990 	struct mgmt_rp_get_device_flags rp;
4991 	struct bdaddr_list_with_flags *br_params;
4992 	struct hci_conn_params *params;
4993 	u32 supported_flags;
4994 	u32 current_flags = 0;
4995 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4996 
4997 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4998 		   &cp->addr.bdaddr, cp->addr.type);
4999 
5000 	hci_dev_lock(hdev);
5001 
5002 	supported_flags = hdev->conn_flags;
5003 
5004 	memset(&rp, 0, sizeof(rp));
5005 
5006 	if (cp->addr.type == BDADDR_BREDR) {
5007 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5008 							      &cp->addr.bdaddr,
5009 							      cp->addr.type);
5010 		if (!br_params)
5011 			goto done;
5012 
5013 		current_flags = br_params->flags;
5014 	} else {
5015 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5016 						le_addr_type(cp->addr.type));
5017 		if (!params)
5018 			goto done;
5019 
5020 		supported_flags = get_params_flags(hdev, params);
5021 		current_flags = params->flags;
5022 	}
5023 
5024 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5025 	rp.addr.type = cp->addr.type;
5026 	rp.supported_flags = cpu_to_le32(supported_flags);
5027 	rp.current_flags = cpu_to_le32(current_flags);
5028 
5029 	status = MGMT_STATUS_SUCCESS;
5030 
5031 done:
5032 	hci_dev_unlock(hdev);
5033 
5034 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5035 				&rp, sizeof(rp));
5036 }
5037 
5038 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5039 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5040 				 u32 supported_flags, u32 current_flags)
5041 {
5042 	struct mgmt_ev_device_flags_changed ev;
5043 
5044 	bacpy(&ev.addr.bdaddr, bdaddr);
5045 	ev.addr.type = bdaddr_type;
5046 	ev.supported_flags = cpu_to_le32(supported_flags);
5047 	ev.current_flags = cpu_to_le32(current_flags);
5048 
5049 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5050 }
5051 
5052 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5053 			    u16 len)
5054 {
5055 	struct mgmt_cp_set_device_flags *cp = data;
5056 	struct bdaddr_list_with_flags *br_params;
5057 	struct hci_conn_params *params;
5058 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5059 	u32 supported_flags;
5060 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5061 
5062 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5063 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5064 
5065 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5066 	supported_flags = hdev->conn_flags;
5067 
5068 	if ((supported_flags | current_flags) != supported_flags) {
5069 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5070 			    current_flags, supported_flags);
5071 		goto done;
5072 	}
5073 
5074 	hci_dev_lock(hdev);
5075 
5076 	if (cp->addr.type == BDADDR_BREDR) {
5077 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5078 							      &cp->addr.bdaddr,
5079 							      cp->addr.type);
5080 
5081 		if (br_params) {
5082 			br_params->flags = current_flags;
5083 			status = MGMT_STATUS_SUCCESS;
5084 		} else {
5085 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5086 				    &cp->addr.bdaddr, cp->addr.type);
5087 		}
5088 
5089 		goto unlock;
5090 	}
5091 
5092 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5093 					le_addr_type(cp->addr.type));
5094 	if (!params) {
5095 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5096 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5097 		goto unlock;
5098 	}
5099 
5100 	supported_flags = get_params_flags(hdev, params);
5101 
5102 	if ((supported_flags | current_flags) != supported_flags) {
5103 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5104 			    current_flags, supported_flags);
5105 		goto unlock;
5106 	}
5107 
5108 	WRITE_ONCE(params->flags, current_flags);
5109 	status = MGMT_STATUS_SUCCESS;
5110 
5111 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5112 	 * has been set.
5113 	 */
5114 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5115 		hci_update_passive_scan(hdev);
5116 
5117 unlock:
5118 	hci_dev_unlock(hdev);
5119 
5120 done:
5121 	if (status == MGMT_STATUS_SUCCESS)
5122 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5123 				     supported_flags, current_flags);
5124 
5125 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5126 				 &cp->addr, sizeof(cp->addr));
5127 }
5128 
5129 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5130 				   u16 handle)
5131 {
5132 	struct mgmt_ev_adv_monitor_added ev;
5133 
5134 	ev.monitor_handle = cpu_to_le16(handle);
5135 
5136 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5137 }
5138 
5139 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5140 {
5141 	struct mgmt_ev_adv_monitor_removed ev;
5142 	struct mgmt_pending_cmd *cmd;
5143 	struct sock *sk_skip = NULL;
5144 	struct mgmt_cp_remove_adv_monitor *cp;
5145 
5146 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5147 	if (cmd) {
5148 		cp = cmd->param;
5149 
5150 		if (cp->monitor_handle)
5151 			sk_skip = cmd->sk;
5152 	}
5153 
5154 	ev.monitor_handle = cpu_to_le16(handle);
5155 
5156 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5157 }
5158 
5159 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5160 				 void *data, u16 len)
5161 {
5162 	struct adv_monitor *monitor = NULL;
5163 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5164 	int handle, err;
5165 	size_t rp_size = 0;
5166 	__u32 supported = 0;
5167 	__u32 enabled = 0;
5168 	__u16 num_handles = 0;
5169 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5170 
5171 	BT_DBG("request for %s", hdev->name);
5172 
5173 	hci_dev_lock(hdev);
5174 
5175 	if (msft_monitor_supported(hdev))
5176 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5177 
5178 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5179 		handles[num_handles++] = monitor->handle;
5180 
5181 	hci_dev_unlock(hdev);
5182 
5183 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5184 	rp = kmalloc(rp_size, GFP_KERNEL);
5185 	if (!rp)
5186 		return -ENOMEM;
5187 
5188 	/* All supported features are currently enabled */
5189 	enabled = supported;
5190 
5191 	rp->supported_features = cpu_to_le32(supported);
5192 	rp->enabled_features = cpu_to_le32(enabled);
5193 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5194 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5195 	rp->num_handles = cpu_to_le16(num_handles);
5196 	if (num_handles)
5197 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5198 
5199 	err = mgmt_cmd_complete(sk, hdev->id,
5200 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5201 				MGMT_STATUS_SUCCESS, rp, rp_size);
5202 
5203 	kfree(rp);
5204 
5205 	return err;
5206 }
5207 
5208 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5209 						   void *data, int status)
5210 {
5211 	struct mgmt_rp_add_adv_patterns_monitor rp;
5212 	struct mgmt_pending_cmd *cmd = data;
5213 	struct adv_monitor *monitor = cmd->user_data;
5214 
5215 	hci_dev_lock(hdev);
5216 
5217 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5218 
5219 	if (!status) {
5220 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5221 		hdev->adv_monitors_cnt++;
5222 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5223 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5224 		hci_update_passive_scan(hdev);
5225 	}
5226 
5227 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5228 			  mgmt_status(status), &rp, sizeof(rp));
5229 	mgmt_pending_remove(cmd);
5230 
5231 	hci_dev_unlock(hdev);
5232 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5233 		   rp.monitor_handle, status);
5234 }
5235 
5236 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5237 {
5238 	struct mgmt_pending_cmd *cmd = data;
5239 	struct adv_monitor *monitor = cmd->user_data;
5240 
5241 	return hci_add_adv_monitor(hdev, monitor);
5242 }
5243 
5244 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5245 				      struct adv_monitor *m, u8 status,
5246 				      void *data, u16 len, u16 op)
5247 {
5248 	struct mgmt_pending_cmd *cmd;
5249 	int err;
5250 
5251 	hci_dev_lock(hdev);
5252 
5253 	if (status)
5254 		goto unlock;
5255 
5256 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5257 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5258 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5259 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5260 		status = MGMT_STATUS_BUSY;
5261 		goto unlock;
5262 	}
5263 
5264 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5265 	if (!cmd) {
5266 		status = MGMT_STATUS_NO_RESOURCES;
5267 		goto unlock;
5268 	}
5269 
5270 	cmd->user_data = m;
5271 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5272 				 mgmt_add_adv_patterns_monitor_complete);
5273 	if (err) {
5274 		if (err == -ENOMEM)
5275 			status = MGMT_STATUS_NO_RESOURCES;
5276 		else
5277 			status = MGMT_STATUS_FAILED;
5278 
5279 		goto unlock;
5280 	}
5281 
5282 	hci_dev_unlock(hdev);
5283 
5284 	return 0;
5285 
5286 unlock:
5287 	hci_free_adv_monitor(hdev, m);
5288 	hci_dev_unlock(hdev);
5289 	return mgmt_cmd_status(sk, hdev->id, op, status);
5290 }
5291 
5292 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5293 				   struct mgmt_adv_rssi_thresholds *rssi)
5294 {
5295 	if (rssi) {
5296 		m->rssi.low_threshold = rssi->low_threshold;
5297 		m->rssi.low_threshold_timeout =
5298 		    __le16_to_cpu(rssi->low_threshold_timeout);
5299 		m->rssi.high_threshold = rssi->high_threshold;
5300 		m->rssi.high_threshold_timeout =
5301 		    __le16_to_cpu(rssi->high_threshold_timeout);
5302 		m->rssi.sampling_period = rssi->sampling_period;
5303 	} else {
5304 		/* Default values. These numbers are the least constricting
5305 		 * parameters for MSFT API to work, so it behaves as if there
5306 		 * are no rssi parameter to consider. May need to be changed
5307 		 * if other API are to be supported.
5308 		 */
5309 		m->rssi.low_threshold = -127;
5310 		m->rssi.low_threshold_timeout = 60;
5311 		m->rssi.high_threshold = -127;
5312 		m->rssi.high_threshold_timeout = 0;
5313 		m->rssi.sampling_period = 0;
5314 	}
5315 }
5316 
5317 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5318 				    struct mgmt_adv_pattern *patterns)
5319 {
5320 	u8 offset = 0, length = 0;
5321 	struct adv_pattern *p = NULL;
5322 	int i;
5323 
5324 	for (i = 0; i < pattern_count; i++) {
5325 		offset = patterns[i].offset;
5326 		length = patterns[i].length;
5327 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5328 		    length > HCI_MAX_EXT_AD_LENGTH ||
5329 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5330 			return MGMT_STATUS_INVALID_PARAMS;
5331 
5332 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5333 		if (!p)
5334 			return MGMT_STATUS_NO_RESOURCES;
5335 
5336 		p->ad_type = patterns[i].ad_type;
5337 		p->offset = patterns[i].offset;
5338 		p->length = patterns[i].length;
5339 		memcpy(p->value, patterns[i].value, p->length);
5340 
5341 		INIT_LIST_HEAD(&p->list);
5342 		list_add(&p->list, &m->patterns);
5343 	}
5344 
5345 	return MGMT_STATUS_SUCCESS;
5346 }
5347 
5348 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5349 				    void *data, u16 len)
5350 {
5351 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5352 	struct adv_monitor *m = NULL;
5353 	u8 status = MGMT_STATUS_SUCCESS;
5354 	size_t expected_size = sizeof(*cp);
5355 
5356 	BT_DBG("request for %s", hdev->name);
5357 
5358 	if (len <= sizeof(*cp)) {
5359 		status = MGMT_STATUS_INVALID_PARAMS;
5360 		goto done;
5361 	}
5362 
5363 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5364 	if (len != expected_size) {
5365 		status = MGMT_STATUS_INVALID_PARAMS;
5366 		goto done;
5367 	}
5368 
5369 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5370 	if (!m) {
5371 		status = MGMT_STATUS_NO_RESOURCES;
5372 		goto done;
5373 	}
5374 
5375 	INIT_LIST_HEAD(&m->patterns);
5376 
5377 	parse_adv_monitor_rssi(m, NULL);
5378 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5379 
5380 done:
5381 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5382 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5383 }
5384 
5385 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5386 					 void *data, u16 len)
5387 {
5388 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5389 	struct adv_monitor *m = NULL;
5390 	u8 status = MGMT_STATUS_SUCCESS;
5391 	size_t expected_size = sizeof(*cp);
5392 
5393 	BT_DBG("request for %s", hdev->name);
5394 
5395 	if (len <= sizeof(*cp)) {
5396 		status = MGMT_STATUS_INVALID_PARAMS;
5397 		goto done;
5398 	}
5399 
5400 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5401 	if (len != expected_size) {
5402 		status = MGMT_STATUS_INVALID_PARAMS;
5403 		goto done;
5404 	}
5405 
5406 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5407 	if (!m) {
5408 		status = MGMT_STATUS_NO_RESOURCES;
5409 		goto done;
5410 	}
5411 
5412 	INIT_LIST_HEAD(&m->patterns);
5413 
5414 	parse_adv_monitor_rssi(m, &cp->rssi);
5415 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5416 
5417 done:
5418 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5419 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5420 }
5421 
5422 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5423 					     void *data, int status)
5424 {
5425 	struct mgmt_rp_remove_adv_monitor rp;
5426 	struct mgmt_pending_cmd *cmd = data;
5427 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5428 
5429 	hci_dev_lock(hdev);
5430 
5431 	rp.monitor_handle = cp->monitor_handle;
5432 
5433 	if (!status)
5434 		hci_update_passive_scan(hdev);
5435 
5436 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5437 			  mgmt_status(status), &rp, sizeof(rp));
5438 	mgmt_pending_remove(cmd);
5439 
5440 	hci_dev_unlock(hdev);
5441 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5442 		   rp.monitor_handle, status);
5443 }
5444 
5445 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5446 {
5447 	struct mgmt_pending_cmd *cmd = data;
5448 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5449 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5450 
5451 	if (!handle)
5452 		return hci_remove_all_adv_monitor(hdev);
5453 
5454 	return hci_remove_single_adv_monitor(hdev, handle);
5455 }
5456 
5457 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5458 			      void *data, u16 len)
5459 {
5460 	struct mgmt_pending_cmd *cmd;
5461 	int err, status;
5462 
5463 	hci_dev_lock(hdev);
5464 
5465 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5466 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5467 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5468 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5469 		status = MGMT_STATUS_BUSY;
5470 		goto unlock;
5471 	}
5472 
5473 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5474 	if (!cmd) {
5475 		status = MGMT_STATUS_NO_RESOURCES;
5476 		goto unlock;
5477 	}
5478 
5479 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5480 				  mgmt_remove_adv_monitor_complete);
5481 
5482 	if (err) {
5483 		mgmt_pending_remove(cmd);
5484 
5485 		if (err == -ENOMEM)
5486 			status = MGMT_STATUS_NO_RESOURCES;
5487 		else
5488 			status = MGMT_STATUS_FAILED;
5489 
5490 		goto unlock;
5491 	}
5492 
5493 	hci_dev_unlock(hdev);
5494 
5495 	return 0;
5496 
5497 unlock:
5498 	hci_dev_unlock(hdev);
5499 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5500 			       status);
5501 }
5502 
5503 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5504 {
5505 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5506 	size_t rp_size = sizeof(mgmt_rp);
5507 	struct mgmt_pending_cmd *cmd = data;
5508 	struct sk_buff *skb = cmd->skb;
5509 	u8 status = mgmt_status(err);
5510 
5511 	if (!status) {
5512 		if (!skb)
5513 			status = MGMT_STATUS_FAILED;
5514 		else if (IS_ERR(skb))
5515 			status = mgmt_status(PTR_ERR(skb));
5516 		else
5517 			status = mgmt_status(skb->data[0]);
5518 	}
5519 
5520 	bt_dev_dbg(hdev, "status %d", status);
5521 
5522 	if (status) {
5523 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5524 		goto remove;
5525 	}
5526 
5527 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5528 
5529 	if (!bredr_sc_enabled(hdev)) {
5530 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5531 
5532 		if (skb->len < sizeof(*rp)) {
5533 			mgmt_cmd_status(cmd->sk, hdev->id,
5534 					MGMT_OP_READ_LOCAL_OOB_DATA,
5535 					MGMT_STATUS_FAILED);
5536 			goto remove;
5537 		}
5538 
5539 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5540 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5541 
5542 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5543 	} else {
5544 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5545 
5546 		if (skb->len < sizeof(*rp)) {
5547 			mgmt_cmd_status(cmd->sk, hdev->id,
5548 					MGMT_OP_READ_LOCAL_OOB_DATA,
5549 					MGMT_STATUS_FAILED);
5550 			goto remove;
5551 		}
5552 
5553 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5554 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5555 
5556 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5557 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5558 	}
5559 
5560 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5561 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5562 
5563 remove:
5564 	if (skb && !IS_ERR(skb))
5565 		kfree_skb(skb);
5566 
5567 	mgmt_pending_free(cmd);
5568 }
5569 
5570 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5571 {
5572 	struct mgmt_pending_cmd *cmd = data;
5573 
5574 	if (bredr_sc_enabled(hdev))
5575 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5576 	else
5577 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5578 
5579 	if (IS_ERR(cmd->skb))
5580 		return PTR_ERR(cmd->skb);
5581 	else
5582 		return 0;
5583 }
5584 
5585 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5586 			       void *data, u16 data_len)
5587 {
5588 	struct mgmt_pending_cmd *cmd;
5589 	int err;
5590 
5591 	bt_dev_dbg(hdev, "sock %p", sk);
5592 
5593 	hci_dev_lock(hdev);
5594 
5595 	if (!hdev_is_powered(hdev)) {
5596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5597 				      MGMT_STATUS_NOT_POWERED);
5598 		goto unlock;
5599 	}
5600 
5601 	if (!lmp_ssp_capable(hdev)) {
5602 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5603 				      MGMT_STATUS_NOT_SUPPORTED);
5604 		goto unlock;
5605 	}
5606 
5607 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5608 	if (!cmd)
5609 		err = -ENOMEM;
5610 	else
5611 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5612 					 read_local_oob_data_complete);
5613 
5614 	if (err < 0) {
5615 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5616 				      MGMT_STATUS_FAILED);
5617 
5618 		if (cmd)
5619 			mgmt_pending_free(cmd);
5620 	}
5621 
5622 unlock:
5623 	hci_dev_unlock(hdev);
5624 	return err;
5625 }
5626 
5627 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5628 			       void *data, u16 len)
5629 {
5630 	struct mgmt_addr_info *addr = data;
5631 	int err;
5632 
5633 	bt_dev_dbg(hdev, "sock %p", sk);
5634 
5635 	if (!bdaddr_type_is_valid(addr->type))
5636 		return mgmt_cmd_complete(sk, hdev->id,
5637 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5638 					 MGMT_STATUS_INVALID_PARAMS,
5639 					 addr, sizeof(*addr));
5640 
5641 	hci_dev_lock(hdev);
5642 
5643 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5644 		struct mgmt_cp_add_remote_oob_data *cp = data;
5645 		u8 status;
5646 
5647 		if (cp->addr.type != BDADDR_BREDR) {
5648 			err = mgmt_cmd_complete(sk, hdev->id,
5649 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5650 						MGMT_STATUS_INVALID_PARAMS,
5651 						&cp->addr, sizeof(cp->addr));
5652 			goto unlock;
5653 		}
5654 
5655 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5656 					      cp->addr.type, cp->hash,
5657 					      cp->rand, NULL, NULL);
5658 		if (err < 0)
5659 			status = MGMT_STATUS_FAILED;
5660 		else
5661 			status = MGMT_STATUS_SUCCESS;
5662 
5663 		err = mgmt_cmd_complete(sk, hdev->id,
5664 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5665 					&cp->addr, sizeof(cp->addr));
5666 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5667 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5668 		u8 *rand192, *hash192, *rand256, *hash256;
5669 		u8 status;
5670 
5671 		if (bdaddr_type_is_le(cp->addr.type)) {
5672 			/* Enforce zero-valued 192-bit parameters as
5673 			 * long as legacy SMP OOB isn't implemented.
5674 			 */
5675 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5676 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5677 				err = mgmt_cmd_complete(sk, hdev->id,
5678 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5679 							MGMT_STATUS_INVALID_PARAMS,
5680 							addr, sizeof(*addr));
5681 				goto unlock;
5682 			}
5683 
5684 			rand192 = NULL;
5685 			hash192 = NULL;
5686 		} else {
5687 			/* In case one of the P-192 values is set to zero,
5688 			 * then just disable OOB data for P-192.
5689 			 */
5690 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5691 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5692 				rand192 = NULL;
5693 				hash192 = NULL;
5694 			} else {
5695 				rand192 = cp->rand192;
5696 				hash192 = cp->hash192;
5697 			}
5698 		}
5699 
5700 		/* In case one of the P-256 values is set to zero, then just
5701 		 * disable OOB data for P-256.
5702 		 */
5703 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5704 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5705 			rand256 = NULL;
5706 			hash256 = NULL;
5707 		} else {
5708 			rand256 = cp->rand256;
5709 			hash256 = cp->hash256;
5710 		}
5711 
5712 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5713 					      cp->addr.type, hash192, rand192,
5714 					      hash256, rand256);
5715 		if (err < 0)
5716 			status = MGMT_STATUS_FAILED;
5717 		else
5718 			status = MGMT_STATUS_SUCCESS;
5719 
5720 		err = mgmt_cmd_complete(sk, hdev->id,
5721 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5722 					status, &cp->addr, sizeof(cp->addr));
5723 	} else {
5724 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5725 			   len);
5726 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5727 				      MGMT_STATUS_INVALID_PARAMS);
5728 	}
5729 
5730 unlock:
5731 	hci_dev_unlock(hdev);
5732 	return err;
5733 }
5734 
5735 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5736 				  void *data, u16 len)
5737 {
5738 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5739 	u8 status;
5740 	int err;
5741 
5742 	bt_dev_dbg(hdev, "sock %p", sk);
5743 
5744 	if (cp->addr.type != BDADDR_BREDR)
5745 		return mgmt_cmd_complete(sk, hdev->id,
5746 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5747 					 MGMT_STATUS_INVALID_PARAMS,
5748 					 &cp->addr, sizeof(cp->addr));
5749 
5750 	hci_dev_lock(hdev);
5751 
5752 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5753 		hci_remote_oob_data_clear(hdev);
5754 		status = MGMT_STATUS_SUCCESS;
5755 		goto done;
5756 	}
5757 
5758 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5759 	if (err < 0)
5760 		status = MGMT_STATUS_INVALID_PARAMS;
5761 	else
5762 		status = MGMT_STATUS_SUCCESS;
5763 
5764 done:
5765 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5766 				status, &cp->addr, sizeof(cp->addr));
5767 
5768 	hci_dev_unlock(hdev);
5769 	return err;
5770 }
5771 
5772 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5773 {
5774 	struct mgmt_pending_cmd *cmd;
5775 
5776 	bt_dev_dbg(hdev, "status %u", status);
5777 
5778 	hci_dev_lock(hdev);
5779 
5780 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5781 	if (!cmd)
5782 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5783 
5784 	if (!cmd)
5785 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5786 
5787 	if (cmd) {
5788 		cmd->cmd_complete(cmd, mgmt_status(status));
5789 		mgmt_pending_remove(cmd);
5790 	}
5791 
5792 	hci_dev_unlock(hdev);
5793 }
5794 
5795 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5796 				    uint8_t *mgmt_status)
5797 {
5798 	switch (type) {
5799 	case DISCOV_TYPE_LE:
5800 		*mgmt_status = mgmt_le_support(hdev);
5801 		if (*mgmt_status)
5802 			return false;
5803 		break;
5804 	case DISCOV_TYPE_INTERLEAVED:
5805 		*mgmt_status = mgmt_le_support(hdev);
5806 		if (*mgmt_status)
5807 			return false;
5808 		fallthrough;
5809 	case DISCOV_TYPE_BREDR:
5810 		*mgmt_status = mgmt_bredr_support(hdev);
5811 		if (*mgmt_status)
5812 			return false;
5813 		break;
5814 	default:
5815 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5816 		return false;
5817 	}
5818 
5819 	return true;
5820 }
5821 
5822 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5823 {
5824 	struct mgmt_pending_cmd *cmd = data;
5825 
5826 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5827 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5828 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5829 		return;
5830 
5831 	bt_dev_dbg(hdev, "err %d", err);
5832 
5833 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5834 			  cmd->param, 1);
5835 	mgmt_pending_remove(cmd);
5836 
5837 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5838 				DISCOVERY_FINDING);
5839 }
5840 
5841 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5842 {
5843 	return hci_start_discovery_sync(hdev);
5844 }
5845 
5846 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5847 				    u16 op, void *data, u16 len)
5848 {
5849 	struct mgmt_cp_start_discovery *cp = data;
5850 	struct mgmt_pending_cmd *cmd;
5851 	u8 status;
5852 	int err;
5853 
5854 	bt_dev_dbg(hdev, "sock %p", sk);
5855 
5856 	hci_dev_lock(hdev);
5857 
5858 	if (!hdev_is_powered(hdev)) {
5859 		err = mgmt_cmd_complete(sk, hdev->id, op,
5860 					MGMT_STATUS_NOT_POWERED,
5861 					&cp->type, sizeof(cp->type));
5862 		goto failed;
5863 	}
5864 
5865 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5866 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5867 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5868 					&cp->type, sizeof(cp->type));
5869 		goto failed;
5870 	}
5871 
5872 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5873 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5874 					&cp->type, sizeof(cp->type));
5875 		goto failed;
5876 	}
5877 
5878 	/* Can't start discovery when it is paused */
5879 	if (hdev->discovery_paused) {
5880 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5881 					&cp->type, sizeof(cp->type));
5882 		goto failed;
5883 	}
5884 
5885 	/* Clear the discovery filter first to free any previously
5886 	 * allocated memory for the UUID list.
5887 	 */
5888 	hci_discovery_filter_clear(hdev);
5889 
5890 	hdev->discovery.type = cp->type;
5891 	hdev->discovery.report_invalid_rssi = false;
5892 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5893 		hdev->discovery.limited = true;
5894 	else
5895 		hdev->discovery.limited = false;
5896 
5897 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5898 	if (!cmd) {
5899 		err = -ENOMEM;
5900 		goto failed;
5901 	}
5902 
5903 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5904 				 start_discovery_complete);
5905 	if (err < 0) {
5906 		mgmt_pending_remove(cmd);
5907 		goto failed;
5908 	}
5909 
5910 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5911 
5912 failed:
5913 	hci_dev_unlock(hdev);
5914 	return err;
5915 }
5916 
5917 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5918 			   void *data, u16 len)
5919 {
5920 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5921 					data, len);
5922 }
5923 
5924 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5925 				   void *data, u16 len)
5926 {
5927 	return start_discovery_internal(sk, hdev,
5928 					MGMT_OP_START_LIMITED_DISCOVERY,
5929 					data, len);
5930 }
5931 
5932 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5933 				   void *data, u16 len)
5934 {
5935 	struct mgmt_cp_start_service_discovery *cp = data;
5936 	struct mgmt_pending_cmd *cmd;
5937 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5938 	u16 uuid_count, expected_len;
5939 	u8 status;
5940 	int err;
5941 
5942 	bt_dev_dbg(hdev, "sock %p", sk);
5943 
5944 	hci_dev_lock(hdev);
5945 
5946 	if (!hdev_is_powered(hdev)) {
5947 		err = mgmt_cmd_complete(sk, hdev->id,
5948 					MGMT_OP_START_SERVICE_DISCOVERY,
5949 					MGMT_STATUS_NOT_POWERED,
5950 					&cp->type, sizeof(cp->type));
5951 		goto failed;
5952 	}
5953 
5954 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5955 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5956 		err = mgmt_cmd_complete(sk, hdev->id,
5957 					MGMT_OP_START_SERVICE_DISCOVERY,
5958 					MGMT_STATUS_BUSY, &cp->type,
5959 					sizeof(cp->type));
5960 		goto failed;
5961 	}
5962 
5963 	if (hdev->discovery_paused) {
5964 		err = mgmt_cmd_complete(sk, hdev->id,
5965 					MGMT_OP_START_SERVICE_DISCOVERY,
5966 					MGMT_STATUS_BUSY, &cp->type,
5967 					sizeof(cp->type));
5968 		goto failed;
5969 	}
5970 
5971 	uuid_count = __le16_to_cpu(cp->uuid_count);
5972 	if (uuid_count > max_uuid_count) {
5973 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5974 			   uuid_count);
5975 		err = mgmt_cmd_complete(sk, hdev->id,
5976 					MGMT_OP_START_SERVICE_DISCOVERY,
5977 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5978 					sizeof(cp->type));
5979 		goto failed;
5980 	}
5981 
5982 	expected_len = sizeof(*cp) + uuid_count * 16;
5983 	if (expected_len != len) {
5984 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5985 			   expected_len, len);
5986 		err = mgmt_cmd_complete(sk, hdev->id,
5987 					MGMT_OP_START_SERVICE_DISCOVERY,
5988 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5989 					sizeof(cp->type));
5990 		goto failed;
5991 	}
5992 
5993 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5994 		err = mgmt_cmd_complete(sk, hdev->id,
5995 					MGMT_OP_START_SERVICE_DISCOVERY,
5996 					status, &cp->type, sizeof(cp->type));
5997 		goto failed;
5998 	}
5999 
6000 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6001 			       hdev, data, len);
6002 	if (!cmd) {
6003 		err = -ENOMEM;
6004 		goto failed;
6005 	}
6006 
6007 	/* Clear the discovery filter first to free any previously
6008 	 * allocated memory for the UUID list.
6009 	 */
6010 	hci_discovery_filter_clear(hdev);
6011 
6012 	hdev->discovery.result_filtering = true;
6013 	hdev->discovery.type = cp->type;
6014 	hdev->discovery.rssi = cp->rssi;
6015 	hdev->discovery.uuid_count = uuid_count;
6016 
6017 	if (uuid_count > 0) {
6018 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6019 						GFP_KERNEL);
6020 		if (!hdev->discovery.uuids) {
6021 			err = mgmt_cmd_complete(sk, hdev->id,
6022 						MGMT_OP_START_SERVICE_DISCOVERY,
6023 						MGMT_STATUS_FAILED,
6024 						&cp->type, sizeof(cp->type));
6025 			mgmt_pending_remove(cmd);
6026 			goto failed;
6027 		}
6028 	}
6029 
6030 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6031 				 start_discovery_complete);
6032 	if (err < 0) {
6033 		mgmt_pending_remove(cmd);
6034 		goto failed;
6035 	}
6036 
6037 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6038 
6039 failed:
6040 	hci_dev_unlock(hdev);
6041 	return err;
6042 }
6043 
6044 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6045 {
6046 	struct mgmt_pending_cmd *cmd;
6047 
6048 	bt_dev_dbg(hdev, "status %u", status);
6049 
6050 	hci_dev_lock(hdev);
6051 
6052 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6053 	if (cmd) {
6054 		cmd->cmd_complete(cmd, mgmt_status(status));
6055 		mgmt_pending_remove(cmd);
6056 	}
6057 
6058 	hci_dev_unlock(hdev);
6059 }
6060 
6061 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6062 {
6063 	struct mgmt_pending_cmd *cmd = data;
6064 
6065 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6066 		return;
6067 
6068 	bt_dev_dbg(hdev, "err %d", err);
6069 
6070 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6071 			  cmd->param, 1);
6072 	mgmt_pending_remove(cmd);
6073 
6074 	if (!err)
6075 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6076 }
6077 
6078 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6079 {
6080 	return hci_stop_discovery_sync(hdev);
6081 }
6082 
6083 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6084 			  u16 len)
6085 {
6086 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6087 	struct mgmt_pending_cmd *cmd;
6088 	int err;
6089 
6090 	bt_dev_dbg(hdev, "sock %p", sk);
6091 
6092 	hci_dev_lock(hdev);
6093 
6094 	if (!hci_discovery_active(hdev)) {
6095 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6096 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6097 					sizeof(mgmt_cp->type));
6098 		goto unlock;
6099 	}
6100 
6101 	if (hdev->discovery.type != mgmt_cp->type) {
6102 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6103 					MGMT_STATUS_INVALID_PARAMS,
6104 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6105 		goto unlock;
6106 	}
6107 
6108 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6109 	if (!cmd) {
6110 		err = -ENOMEM;
6111 		goto unlock;
6112 	}
6113 
6114 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6115 				 stop_discovery_complete);
6116 	if (err < 0) {
6117 		mgmt_pending_remove(cmd);
6118 		goto unlock;
6119 	}
6120 
6121 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6122 
6123 unlock:
6124 	hci_dev_unlock(hdev);
6125 	return err;
6126 }
6127 
6128 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6129 			u16 len)
6130 {
6131 	struct mgmt_cp_confirm_name *cp = data;
6132 	struct inquiry_entry *e;
6133 	int err;
6134 
6135 	bt_dev_dbg(hdev, "sock %p", sk);
6136 
6137 	hci_dev_lock(hdev);
6138 
6139 	if (!hci_discovery_active(hdev)) {
6140 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6141 					MGMT_STATUS_FAILED, &cp->addr,
6142 					sizeof(cp->addr));
6143 		goto failed;
6144 	}
6145 
6146 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6147 	if (!e) {
6148 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6149 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6150 					sizeof(cp->addr));
6151 		goto failed;
6152 	}
6153 
6154 	if (cp->name_known) {
6155 		e->name_state = NAME_KNOWN;
6156 		list_del(&e->list);
6157 	} else {
6158 		e->name_state = NAME_NEEDED;
6159 		hci_inquiry_cache_update_resolve(hdev, e);
6160 	}
6161 
6162 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6163 				&cp->addr, sizeof(cp->addr));
6164 
6165 failed:
6166 	hci_dev_unlock(hdev);
6167 	return err;
6168 }
6169 
6170 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6171 			u16 len)
6172 {
6173 	struct mgmt_cp_block_device *cp = data;
6174 	u8 status;
6175 	int err;
6176 
6177 	bt_dev_dbg(hdev, "sock %p", sk);
6178 
6179 	if (!bdaddr_type_is_valid(cp->addr.type))
6180 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6181 					 MGMT_STATUS_INVALID_PARAMS,
6182 					 &cp->addr, sizeof(cp->addr));
6183 
6184 	hci_dev_lock(hdev);
6185 
6186 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6187 				  cp->addr.type);
6188 	if (err < 0) {
6189 		status = MGMT_STATUS_FAILED;
6190 		goto done;
6191 	}
6192 
6193 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6194 		   sk);
6195 	status = MGMT_STATUS_SUCCESS;
6196 
6197 done:
6198 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6199 				&cp->addr, sizeof(cp->addr));
6200 
6201 	hci_dev_unlock(hdev);
6202 
6203 	return err;
6204 }
6205 
6206 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6207 			  u16 len)
6208 {
6209 	struct mgmt_cp_unblock_device *cp = data;
6210 	u8 status;
6211 	int err;
6212 
6213 	bt_dev_dbg(hdev, "sock %p", sk);
6214 
6215 	if (!bdaddr_type_is_valid(cp->addr.type))
6216 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6217 					 MGMT_STATUS_INVALID_PARAMS,
6218 					 &cp->addr, sizeof(cp->addr));
6219 
6220 	hci_dev_lock(hdev);
6221 
6222 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6223 				  cp->addr.type);
6224 	if (err < 0) {
6225 		status = MGMT_STATUS_INVALID_PARAMS;
6226 		goto done;
6227 	}
6228 
6229 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6230 		   sk);
6231 	status = MGMT_STATUS_SUCCESS;
6232 
6233 done:
6234 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6235 				&cp->addr, sizeof(cp->addr));
6236 
6237 	hci_dev_unlock(hdev);
6238 
6239 	return err;
6240 }
6241 
6242 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6243 {
6244 	return hci_update_eir_sync(hdev);
6245 }
6246 
6247 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6248 			 u16 len)
6249 {
6250 	struct mgmt_cp_set_device_id *cp = data;
6251 	int err;
6252 	__u16 source;
6253 
6254 	bt_dev_dbg(hdev, "sock %p", sk);
6255 
6256 	source = __le16_to_cpu(cp->source);
6257 
6258 	if (source > 0x0002)
6259 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6260 				       MGMT_STATUS_INVALID_PARAMS);
6261 
6262 	hci_dev_lock(hdev);
6263 
6264 	hdev->devid_source = source;
6265 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6266 	hdev->devid_product = __le16_to_cpu(cp->product);
6267 	hdev->devid_version = __le16_to_cpu(cp->version);
6268 
6269 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6270 				NULL, 0);
6271 
6272 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6273 
6274 	hci_dev_unlock(hdev);
6275 
6276 	return err;
6277 }
6278 
6279 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6280 {
6281 	if (err)
6282 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6283 	else
6284 		bt_dev_dbg(hdev, "status %d", err);
6285 }
6286 
6287 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6288 {
6289 	struct cmd_lookup match = { NULL, hdev };
6290 	u8 instance;
6291 	struct adv_info *adv_instance;
6292 	u8 status = mgmt_status(err);
6293 
6294 	if (status) {
6295 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6296 				     cmd_status_rsp, &status);
6297 		return;
6298 	}
6299 
6300 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6301 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6302 	else
6303 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6304 
6305 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6306 			     &match);
6307 
6308 	new_settings(hdev, match.sk);
6309 
6310 	if (match.sk)
6311 		sock_put(match.sk);
6312 
6313 	/* If "Set Advertising" was just disabled and instance advertising was
6314 	 * set up earlier, then re-enable multi-instance advertising.
6315 	 */
6316 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6317 	    list_empty(&hdev->adv_instances))
6318 		return;
6319 
6320 	instance = hdev->cur_adv_instance;
6321 	if (!instance) {
6322 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6323 							struct adv_info, list);
6324 		if (!adv_instance)
6325 			return;
6326 
6327 		instance = adv_instance->instance;
6328 	}
6329 
6330 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6331 
6332 	enable_advertising_instance(hdev, err);
6333 }
6334 
6335 static int set_adv_sync(struct hci_dev *hdev, void *data)
6336 {
6337 	struct mgmt_pending_cmd *cmd = data;
6338 	struct mgmt_mode *cp = cmd->param;
6339 	u8 val = !!cp->val;
6340 
6341 	if (cp->val == 0x02)
6342 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6343 	else
6344 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6345 
6346 	cancel_adv_timeout(hdev);
6347 
6348 	if (val) {
6349 		/* Switch to instance "0" for the Set Advertising setting.
6350 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6351 		 * HCI_ADVERTISING flag is not yet set.
6352 		 */
6353 		hdev->cur_adv_instance = 0x00;
6354 
6355 		if (ext_adv_capable(hdev)) {
6356 			hci_start_ext_adv_sync(hdev, 0x00);
6357 		} else {
6358 			hci_update_adv_data_sync(hdev, 0x00);
6359 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6360 			hci_enable_advertising_sync(hdev);
6361 		}
6362 	} else {
6363 		hci_disable_advertising_sync(hdev);
6364 	}
6365 
6366 	return 0;
6367 }
6368 
6369 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6370 			   u16 len)
6371 {
6372 	struct mgmt_mode *cp = data;
6373 	struct mgmt_pending_cmd *cmd;
6374 	u8 val, status;
6375 	int err;
6376 
6377 	bt_dev_dbg(hdev, "sock %p", sk);
6378 
6379 	status = mgmt_le_support(hdev);
6380 	if (status)
6381 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6382 				       status);
6383 
6384 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6385 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6386 				       MGMT_STATUS_INVALID_PARAMS);
6387 
6388 	if (hdev->advertising_paused)
6389 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6390 				       MGMT_STATUS_BUSY);
6391 
6392 	hci_dev_lock(hdev);
6393 
6394 	val = !!cp->val;
6395 
6396 	/* The following conditions are ones which mean that we should
6397 	 * not do any HCI communication but directly send a mgmt
6398 	 * response to user space (after toggling the flag if
6399 	 * necessary).
6400 	 */
6401 	if (!hdev_is_powered(hdev) ||
6402 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6403 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6404 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6405 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6406 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6407 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6408 		bool changed;
6409 
6410 		if (cp->val) {
6411 			hdev->cur_adv_instance = 0x00;
6412 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6413 			if (cp->val == 0x02)
6414 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6415 			else
6416 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6417 		} else {
6418 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6419 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6420 		}
6421 
6422 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6423 		if (err < 0)
6424 			goto unlock;
6425 
6426 		if (changed)
6427 			err = new_settings(hdev, sk);
6428 
6429 		goto unlock;
6430 	}
6431 
6432 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6433 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6434 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6435 				      MGMT_STATUS_BUSY);
6436 		goto unlock;
6437 	}
6438 
6439 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6440 	if (!cmd)
6441 		err = -ENOMEM;
6442 	else
6443 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6444 					 set_advertising_complete);
6445 
6446 	if (err < 0 && cmd)
6447 		mgmt_pending_remove(cmd);
6448 
6449 unlock:
6450 	hci_dev_unlock(hdev);
6451 	return err;
6452 }
6453 
6454 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6455 			      void *data, u16 len)
6456 {
6457 	struct mgmt_cp_set_static_address *cp = data;
6458 	int err;
6459 
6460 	bt_dev_dbg(hdev, "sock %p", sk);
6461 
6462 	if (!lmp_le_capable(hdev))
6463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6464 				       MGMT_STATUS_NOT_SUPPORTED);
6465 
6466 	if (hdev_is_powered(hdev))
6467 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6468 				       MGMT_STATUS_REJECTED);
6469 
6470 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6471 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6472 			return mgmt_cmd_status(sk, hdev->id,
6473 					       MGMT_OP_SET_STATIC_ADDRESS,
6474 					       MGMT_STATUS_INVALID_PARAMS);
6475 
6476 		/* Two most significant bits shall be set */
6477 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6478 			return mgmt_cmd_status(sk, hdev->id,
6479 					       MGMT_OP_SET_STATIC_ADDRESS,
6480 					       MGMT_STATUS_INVALID_PARAMS);
6481 	}
6482 
6483 	hci_dev_lock(hdev);
6484 
6485 	bacpy(&hdev->static_addr, &cp->bdaddr);
6486 
6487 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6488 	if (err < 0)
6489 		goto unlock;
6490 
6491 	err = new_settings(hdev, sk);
6492 
6493 unlock:
6494 	hci_dev_unlock(hdev);
6495 	return err;
6496 }
6497 
6498 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6499 			   void *data, u16 len)
6500 {
6501 	struct mgmt_cp_set_scan_params *cp = data;
6502 	__u16 interval, window;
6503 	int err;
6504 
6505 	bt_dev_dbg(hdev, "sock %p", sk);
6506 
6507 	if (!lmp_le_capable(hdev))
6508 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6509 				       MGMT_STATUS_NOT_SUPPORTED);
6510 
6511 	interval = __le16_to_cpu(cp->interval);
6512 
6513 	if (interval < 0x0004 || interval > 0x4000)
6514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6515 				       MGMT_STATUS_INVALID_PARAMS);
6516 
6517 	window = __le16_to_cpu(cp->window);
6518 
6519 	if (window < 0x0004 || window > 0x4000)
6520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6521 				       MGMT_STATUS_INVALID_PARAMS);
6522 
6523 	if (window > interval)
6524 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6525 				       MGMT_STATUS_INVALID_PARAMS);
6526 
6527 	hci_dev_lock(hdev);
6528 
6529 	hdev->le_scan_interval = interval;
6530 	hdev->le_scan_window = window;
6531 
6532 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6533 				NULL, 0);
6534 
6535 	/* If background scan is running, restart it so new parameters are
6536 	 * loaded.
6537 	 */
6538 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6539 	    hdev->discovery.state == DISCOVERY_STOPPED)
6540 		hci_update_passive_scan(hdev);
6541 
6542 	hci_dev_unlock(hdev);
6543 
6544 	return err;
6545 }
6546 
6547 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6548 {
6549 	struct mgmt_pending_cmd *cmd = data;
6550 
6551 	bt_dev_dbg(hdev, "err %d", err);
6552 
6553 	if (err) {
6554 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6555 				mgmt_status(err));
6556 	} else {
6557 		struct mgmt_mode *cp = cmd->param;
6558 
6559 		if (cp->val)
6560 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6561 		else
6562 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6563 
6564 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6565 		new_settings(hdev, cmd->sk);
6566 	}
6567 
6568 	mgmt_pending_free(cmd);
6569 }
6570 
6571 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6572 {
6573 	struct mgmt_pending_cmd *cmd = data;
6574 	struct mgmt_mode *cp = cmd->param;
6575 
6576 	return hci_write_fast_connectable_sync(hdev, cp->val);
6577 }
6578 
6579 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6580 				void *data, u16 len)
6581 {
6582 	struct mgmt_mode *cp = data;
6583 	struct mgmt_pending_cmd *cmd;
6584 	int err;
6585 
6586 	bt_dev_dbg(hdev, "sock %p", sk);
6587 
6588 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6589 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6590 		return mgmt_cmd_status(sk, hdev->id,
6591 				       MGMT_OP_SET_FAST_CONNECTABLE,
6592 				       MGMT_STATUS_NOT_SUPPORTED);
6593 
6594 	if (cp->val != 0x00 && cp->val != 0x01)
6595 		return mgmt_cmd_status(sk, hdev->id,
6596 				       MGMT_OP_SET_FAST_CONNECTABLE,
6597 				       MGMT_STATUS_INVALID_PARAMS);
6598 
6599 	hci_dev_lock(hdev);
6600 
6601 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6602 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6603 		goto unlock;
6604 	}
6605 
6606 	if (!hdev_is_powered(hdev)) {
6607 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6608 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6609 		new_settings(hdev, sk);
6610 		goto unlock;
6611 	}
6612 
6613 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6614 			       len);
6615 	if (!cmd)
6616 		err = -ENOMEM;
6617 	else
6618 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6619 					 fast_connectable_complete);
6620 
6621 	if (err < 0) {
6622 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6623 				MGMT_STATUS_FAILED);
6624 
6625 		if (cmd)
6626 			mgmt_pending_free(cmd);
6627 	}
6628 
6629 unlock:
6630 	hci_dev_unlock(hdev);
6631 
6632 	return err;
6633 }
6634 
6635 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6636 {
6637 	struct mgmt_pending_cmd *cmd = data;
6638 
6639 	bt_dev_dbg(hdev, "err %d", err);
6640 
6641 	if (err) {
6642 		u8 mgmt_err = mgmt_status(err);
6643 
6644 		/* We need to restore the flag if related HCI commands
6645 		 * failed.
6646 		 */
6647 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6648 
6649 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6650 	} else {
6651 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6652 		new_settings(hdev, cmd->sk);
6653 	}
6654 
6655 	mgmt_pending_free(cmd);
6656 }
6657 
6658 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6659 {
6660 	int status;
6661 
6662 	status = hci_write_fast_connectable_sync(hdev, false);
6663 
6664 	if (!status)
6665 		status = hci_update_scan_sync(hdev);
6666 
6667 	/* Since only the advertising data flags will change, there
6668 	 * is no need to update the scan response data.
6669 	 */
6670 	if (!status)
6671 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6672 
6673 	return status;
6674 }
6675 
6676 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6677 {
6678 	struct mgmt_mode *cp = data;
6679 	struct mgmt_pending_cmd *cmd;
6680 	int err;
6681 
6682 	bt_dev_dbg(hdev, "sock %p", sk);
6683 
6684 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6685 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6686 				       MGMT_STATUS_NOT_SUPPORTED);
6687 
6688 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6689 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 				       MGMT_STATUS_REJECTED);
6691 
6692 	if (cp->val != 0x00 && cp->val != 0x01)
6693 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6694 				       MGMT_STATUS_INVALID_PARAMS);
6695 
6696 	hci_dev_lock(hdev);
6697 
6698 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6699 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6700 		goto unlock;
6701 	}
6702 
6703 	if (!hdev_is_powered(hdev)) {
6704 		if (!cp->val) {
6705 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6706 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6707 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6708 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6709 		}
6710 
6711 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6712 
6713 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6714 		if (err < 0)
6715 			goto unlock;
6716 
6717 		err = new_settings(hdev, sk);
6718 		goto unlock;
6719 	}
6720 
6721 	/* Reject disabling when powered on */
6722 	if (!cp->val) {
6723 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6724 				      MGMT_STATUS_REJECTED);
6725 		goto unlock;
6726 	} else {
6727 		/* When configuring a dual-mode controller to operate
6728 		 * with LE only and using a static address, then switching
6729 		 * BR/EDR back on is not allowed.
6730 		 *
6731 		 * Dual-mode controllers shall operate with the public
6732 		 * address as its identity address for BR/EDR and LE. So
6733 		 * reject the attempt to create an invalid configuration.
6734 		 *
6735 		 * The same restrictions applies when secure connections
6736 		 * has been enabled. For BR/EDR this is a controller feature
6737 		 * while for LE it is a host stack feature. This means that
6738 		 * switching BR/EDR back on when secure connections has been
6739 		 * enabled is not a supported transaction.
6740 		 */
6741 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6742 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6743 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6744 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6745 					      MGMT_STATUS_REJECTED);
6746 			goto unlock;
6747 		}
6748 	}
6749 
6750 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6751 	if (!cmd)
6752 		err = -ENOMEM;
6753 	else
6754 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6755 					 set_bredr_complete);
6756 
6757 	if (err < 0) {
6758 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6759 				MGMT_STATUS_FAILED);
6760 		if (cmd)
6761 			mgmt_pending_free(cmd);
6762 
6763 		goto unlock;
6764 	}
6765 
6766 	/* We need to flip the bit already here so that
6767 	 * hci_req_update_adv_data generates the correct flags.
6768 	 */
6769 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6770 
6771 unlock:
6772 	hci_dev_unlock(hdev);
6773 	return err;
6774 }
6775 
6776 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6777 {
6778 	struct mgmt_pending_cmd *cmd = data;
6779 	struct mgmt_mode *cp;
6780 
6781 	bt_dev_dbg(hdev, "err %d", err);
6782 
6783 	if (err) {
6784 		u8 mgmt_err = mgmt_status(err);
6785 
6786 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6787 		goto done;
6788 	}
6789 
6790 	cp = cmd->param;
6791 
6792 	switch (cp->val) {
6793 	case 0x00:
6794 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6795 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6796 		break;
6797 	case 0x01:
6798 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6799 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6800 		break;
6801 	case 0x02:
6802 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6803 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6804 		break;
6805 	}
6806 
6807 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6808 	new_settings(hdev, cmd->sk);
6809 
6810 done:
6811 	mgmt_pending_free(cmd);
6812 }
6813 
6814 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6815 {
6816 	struct mgmt_pending_cmd *cmd = data;
6817 	struct mgmt_mode *cp = cmd->param;
6818 	u8 val = !!cp->val;
6819 
6820 	/* Force write of val */
6821 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6822 
6823 	return hci_write_sc_support_sync(hdev, val);
6824 }
6825 
6826 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6827 			   void *data, u16 len)
6828 {
6829 	struct mgmt_mode *cp = data;
6830 	struct mgmt_pending_cmd *cmd;
6831 	u8 val;
6832 	int err;
6833 
6834 	bt_dev_dbg(hdev, "sock %p", sk);
6835 
6836 	if (!lmp_sc_capable(hdev) &&
6837 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6838 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6839 				       MGMT_STATUS_NOT_SUPPORTED);
6840 
6841 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6842 	    lmp_sc_capable(hdev) &&
6843 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6845 				       MGMT_STATUS_REJECTED);
6846 
6847 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6849 				       MGMT_STATUS_INVALID_PARAMS);
6850 
6851 	hci_dev_lock(hdev);
6852 
6853 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6854 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6855 		bool changed;
6856 
6857 		if (cp->val) {
6858 			changed = !hci_dev_test_and_set_flag(hdev,
6859 							     HCI_SC_ENABLED);
6860 			if (cp->val == 0x02)
6861 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6862 			else
6863 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6864 		} else {
6865 			changed = hci_dev_test_and_clear_flag(hdev,
6866 							      HCI_SC_ENABLED);
6867 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6868 		}
6869 
6870 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6871 		if (err < 0)
6872 			goto failed;
6873 
6874 		if (changed)
6875 			err = new_settings(hdev, sk);
6876 
6877 		goto failed;
6878 	}
6879 
6880 	val = !!cp->val;
6881 
6882 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6883 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6884 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6885 		goto failed;
6886 	}
6887 
6888 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6889 	if (!cmd)
6890 		err = -ENOMEM;
6891 	else
6892 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6893 					 set_secure_conn_complete);
6894 
6895 	if (err < 0) {
6896 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6897 				MGMT_STATUS_FAILED);
6898 		if (cmd)
6899 			mgmt_pending_free(cmd);
6900 	}
6901 
6902 failed:
6903 	hci_dev_unlock(hdev);
6904 	return err;
6905 }
6906 
6907 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6908 			  void *data, u16 len)
6909 {
6910 	struct mgmt_mode *cp = data;
6911 	bool changed, use_changed;
6912 	int err;
6913 
6914 	bt_dev_dbg(hdev, "sock %p", sk);
6915 
6916 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6917 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6918 				       MGMT_STATUS_INVALID_PARAMS);
6919 
6920 	hci_dev_lock(hdev);
6921 
6922 	if (cp->val)
6923 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6924 	else
6925 		changed = hci_dev_test_and_clear_flag(hdev,
6926 						      HCI_KEEP_DEBUG_KEYS);
6927 
6928 	if (cp->val == 0x02)
6929 		use_changed = !hci_dev_test_and_set_flag(hdev,
6930 							 HCI_USE_DEBUG_KEYS);
6931 	else
6932 		use_changed = hci_dev_test_and_clear_flag(hdev,
6933 							  HCI_USE_DEBUG_KEYS);
6934 
6935 	if (hdev_is_powered(hdev) && use_changed &&
6936 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6937 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6938 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6939 			     sizeof(mode), &mode);
6940 	}
6941 
6942 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6943 	if (err < 0)
6944 		goto unlock;
6945 
6946 	if (changed)
6947 		err = new_settings(hdev, sk);
6948 
6949 unlock:
6950 	hci_dev_unlock(hdev);
6951 	return err;
6952 }
6953 
6954 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6955 		       u16 len)
6956 {
6957 	struct mgmt_cp_set_privacy *cp = cp_data;
6958 	bool changed;
6959 	int err;
6960 
6961 	bt_dev_dbg(hdev, "sock %p", sk);
6962 
6963 	if (!lmp_le_capable(hdev))
6964 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6965 				       MGMT_STATUS_NOT_SUPPORTED);
6966 
6967 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6968 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6969 				       MGMT_STATUS_INVALID_PARAMS);
6970 
6971 	if (hdev_is_powered(hdev))
6972 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6973 				       MGMT_STATUS_REJECTED);
6974 
6975 	hci_dev_lock(hdev);
6976 
6977 	/* If user space supports this command it is also expected to
6978 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6979 	 */
6980 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6981 
6982 	if (cp->privacy) {
6983 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6984 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6985 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6986 		hci_adv_instances_set_rpa_expired(hdev, true);
6987 		if (cp->privacy == 0x02)
6988 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6989 		else
6990 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6991 	} else {
6992 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6993 		memset(hdev->irk, 0, sizeof(hdev->irk));
6994 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6995 		hci_adv_instances_set_rpa_expired(hdev, false);
6996 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6997 	}
6998 
6999 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7000 	if (err < 0)
7001 		goto unlock;
7002 
7003 	if (changed)
7004 		err = new_settings(hdev, sk);
7005 
7006 unlock:
7007 	hci_dev_unlock(hdev);
7008 	return err;
7009 }
7010 
7011 static bool irk_is_valid(struct mgmt_irk_info *irk)
7012 {
7013 	switch (irk->addr.type) {
7014 	case BDADDR_LE_PUBLIC:
7015 		return true;
7016 
7017 	case BDADDR_LE_RANDOM:
7018 		/* Two most significant bits shall be set */
7019 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7020 			return false;
7021 		return true;
7022 	}
7023 
7024 	return false;
7025 }
7026 
7027 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7028 		     u16 len)
7029 {
7030 	struct mgmt_cp_load_irks *cp = cp_data;
7031 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7032 				   sizeof(struct mgmt_irk_info));
7033 	u16 irk_count, expected_len;
7034 	int i, err;
7035 
7036 	bt_dev_dbg(hdev, "sock %p", sk);
7037 
7038 	if (!lmp_le_capable(hdev))
7039 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7040 				       MGMT_STATUS_NOT_SUPPORTED);
7041 
7042 	irk_count = __le16_to_cpu(cp->irk_count);
7043 	if (irk_count > max_irk_count) {
7044 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7045 			   irk_count);
7046 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7047 				       MGMT_STATUS_INVALID_PARAMS);
7048 	}
7049 
7050 	expected_len = struct_size(cp, irks, irk_count);
7051 	if (expected_len != len) {
7052 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7053 			   expected_len, len);
7054 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7055 				       MGMT_STATUS_INVALID_PARAMS);
7056 	}
7057 
7058 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7059 
7060 	for (i = 0; i < irk_count; i++) {
7061 		struct mgmt_irk_info *key = &cp->irks[i];
7062 
7063 		if (!irk_is_valid(key))
7064 			return mgmt_cmd_status(sk, hdev->id,
7065 					       MGMT_OP_LOAD_IRKS,
7066 					       MGMT_STATUS_INVALID_PARAMS);
7067 	}
7068 
7069 	hci_dev_lock(hdev);
7070 
7071 	hci_smp_irks_clear(hdev);
7072 
7073 	for (i = 0; i < irk_count; i++) {
7074 		struct mgmt_irk_info *irk = &cp->irks[i];
7075 		u8 addr_type = le_addr_type(irk->addr.type);
7076 
7077 		if (hci_is_blocked_key(hdev,
7078 				       HCI_BLOCKED_KEY_TYPE_IRK,
7079 				       irk->val)) {
7080 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7081 				    &irk->addr.bdaddr);
7082 			continue;
7083 		}
7084 
7085 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7086 		if (irk->addr.type == BDADDR_BREDR)
7087 			addr_type = BDADDR_BREDR;
7088 
7089 		hci_add_irk(hdev, &irk->addr.bdaddr,
7090 			    addr_type, irk->val,
7091 			    BDADDR_ANY);
7092 	}
7093 
7094 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7095 
7096 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7097 
7098 	hci_dev_unlock(hdev);
7099 
7100 	return err;
7101 }
7102 
7103 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7104 {
7105 	if (key->initiator != 0x00 && key->initiator != 0x01)
7106 		return false;
7107 
7108 	switch (key->addr.type) {
7109 	case BDADDR_LE_PUBLIC:
7110 		return true;
7111 
7112 	case BDADDR_LE_RANDOM:
7113 		/* Two most significant bits shall be set */
7114 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7115 			return false;
7116 		return true;
7117 	}
7118 
7119 	return false;
7120 }
7121 
7122 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7123 			       void *cp_data, u16 len)
7124 {
7125 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7126 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7127 				   sizeof(struct mgmt_ltk_info));
7128 	u16 key_count, expected_len;
7129 	int i, err;
7130 
7131 	bt_dev_dbg(hdev, "sock %p", sk);
7132 
7133 	if (!lmp_le_capable(hdev))
7134 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7135 				       MGMT_STATUS_NOT_SUPPORTED);
7136 
7137 	key_count = __le16_to_cpu(cp->key_count);
7138 	if (key_count > max_key_count) {
7139 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7140 			   key_count);
7141 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7142 				       MGMT_STATUS_INVALID_PARAMS);
7143 	}
7144 
7145 	expected_len = struct_size(cp, keys, key_count);
7146 	if (expected_len != len) {
7147 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7148 			   expected_len, len);
7149 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7150 				       MGMT_STATUS_INVALID_PARAMS);
7151 	}
7152 
7153 	bt_dev_dbg(hdev, "key_count %u", key_count);
7154 
7155 	for (i = 0; i < key_count; i++) {
7156 		struct mgmt_ltk_info *key = &cp->keys[i];
7157 
7158 		if (!ltk_is_valid(key))
7159 			return mgmt_cmd_status(sk, hdev->id,
7160 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7161 					       MGMT_STATUS_INVALID_PARAMS);
7162 	}
7163 
7164 	hci_dev_lock(hdev);
7165 
7166 	hci_smp_ltks_clear(hdev);
7167 
7168 	for (i = 0; i < key_count; i++) {
7169 		struct mgmt_ltk_info *key = &cp->keys[i];
7170 		u8 type, authenticated;
7171 		u8 addr_type = le_addr_type(key->addr.type);
7172 
7173 		if (hci_is_blocked_key(hdev,
7174 				       HCI_BLOCKED_KEY_TYPE_LTK,
7175 				       key->val)) {
7176 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7177 				    &key->addr.bdaddr);
7178 			continue;
7179 		}
7180 
7181 		switch (key->type) {
7182 		case MGMT_LTK_UNAUTHENTICATED:
7183 			authenticated = 0x00;
7184 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7185 			break;
7186 		case MGMT_LTK_AUTHENTICATED:
7187 			authenticated = 0x01;
7188 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7189 			break;
7190 		case MGMT_LTK_P256_UNAUTH:
7191 			authenticated = 0x00;
7192 			type = SMP_LTK_P256;
7193 			break;
7194 		case MGMT_LTK_P256_AUTH:
7195 			authenticated = 0x01;
7196 			type = SMP_LTK_P256;
7197 			break;
7198 		case MGMT_LTK_P256_DEBUG:
7199 			authenticated = 0x00;
7200 			type = SMP_LTK_P256_DEBUG;
7201 			fallthrough;
7202 		default:
7203 			continue;
7204 		}
7205 
7206 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7207 		if (key->addr.type == BDADDR_BREDR)
7208 			addr_type = BDADDR_BREDR;
7209 
7210 		hci_add_ltk(hdev, &key->addr.bdaddr,
7211 			    addr_type, type, authenticated,
7212 			    key->val, key->enc_size, key->ediv, key->rand);
7213 	}
7214 
7215 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7216 			   NULL, 0);
7217 
7218 	hci_dev_unlock(hdev);
7219 
7220 	return err;
7221 }
7222 
7223 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7224 {
7225 	struct mgmt_pending_cmd *cmd = data;
7226 	struct hci_conn *conn = cmd->user_data;
7227 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7228 	struct mgmt_rp_get_conn_info rp;
7229 	u8 status;
7230 
7231 	bt_dev_dbg(hdev, "err %d", err);
7232 
7233 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7234 
7235 	status = mgmt_status(err);
7236 	if (status == MGMT_STATUS_SUCCESS) {
7237 		rp.rssi = conn->rssi;
7238 		rp.tx_power = conn->tx_power;
7239 		rp.max_tx_power = conn->max_tx_power;
7240 	} else {
7241 		rp.rssi = HCI_RSSI_INVALID;
7242 		rp.tx_power = HCI_TX_POWER_INVALID;
7243 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7244 	}
7245 
7246 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7247 			  &rp, sizeof(rp));
7248 
7249 	mgmt_pending_free(cmd);
7250 }
7251 
7252 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7253 {
7254 	struct mgmt_pending_cmd *cmd = data;
7255 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7256 	struct hci_conn *conn;
7257 	int err;
7258 	__le16   handle;
7259 
7260 	/* Make sure we are still connected */
7261 	if (cp->addr.type == BDADDR_BREDR)
7262 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7263 					       &cp->addr.bdaddr);
7264 	else
7265 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7266 
7267 	if (!conn || conn->state != BT_CONNECTED)
7268 		return MGMT_STATUS_NOT_CONNECTED;
7269 
7270 	cmd->user_data = conn;
7271 	handle = cpu_to_le16(conn->handle);
7272 
7273 	/* Refresh RSSI each time */
7274 	err = hci_read_rssi_sync(hdev, handle);
7275 
7276 	/* For LE links TX power does not change thus we don't need to
7277 	 * query for it once value is known.
7278 	 */
7279 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7280 		     conn->tx_power == HCI_TX_POWER_INVALID))
7281 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7282 
7283 	/* Max TX power needs to be read only once per connection */
7284 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7285 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7286 
7287 	return err;
7288 }
7289 
7290 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7291 			 u16 len)
7292 {
7293 	struct mgmt_cp_get_conn_info *cp = data;
7294 	struct mgmt_rp_get_conn_info rp;
7295 	struct hci_conn *conn;
7296 	unsigned long conn_info_age;
7297 	int err = 0;
7298 
7299 	bt_dev_dbg(hdev, "sock %p", sk);
7300 
7301 	memset(&rp, 0, sizeof(rp));
7302 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7303 	rp.addr.type = cp->addr.type;
7304 
7305 	if (!bdaddr_type_is_valid(cp->addr.type))
7306 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7307 					 MGMT_STATUS_INVALID_PARAMS,
7308 					 &rp, sizeof(rp));
7309 
7310 	hci_dev_lock(hdev);
7311 
7312 	if (!hdev_is_powered(hdev)) {
7313 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7314 					MGMT_STATUS_NOT_POWERED, &rp,
7315 					sizeof(rp));
7316 		goto unlock;
7317 	}
7318 
7319 	if (cp->addr.type == BDADDR_BREDR)
7320 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7321 					       &cp->addr.bdaddr);
7322 	else
7323 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7324 
7325 	if (!conn || conn->state != BT_CONNECTED) {
7326 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7327 					MGMT_STATUS_NOT_CONNECTED, &rp,
7328 					sizeof(rp));
7329 		goto unlock;
7330 	}
7331 
7332 	/* To avoid client trying to guess when to poll again for information we
7333 	 * calculate conn info age as random value between min/max set in hdev.
7334 	 */
7335 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7336 						 hdev->conn_info_max_age - 1);
7337 
7338 	/* Query controller to refresh cached values if they are too old or were
7339 	 * never read.
7340 	 */
7341 	if (time_after(jiffies, conn->conn_info_timestamp +
7342 		       msecs_to_jiffies(conn_info_age)) ||
7343 	    !conn->conn_info_timestamp) {
7344 		struct mgmt_pending_cmd *cmd;
7345 
7346 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7347 				       len);
7348 		if (!cmd) {
7349 			err = -ENOMEM;
7350 		} else {
7351 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7352 						 cmd, get_conn_info_complete);
7353 		}
7354 
7355 		if (err < 0) {
7356 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7357 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7358 
7359 			if (cmd)
7360 				mgmt_pending_free(cmd);
7361 
7362 			goto unlock;
7363 		}
7364 
7365 		conn->conn_info_timestamp = jiffies;
7366 	} else {
7367 		/* Cache is valid, just reply with values cached in hci_conn */
7368 		rp.rssi = conn->rssi;
7369 		rp.tx_power = conn->tx_power;
7370 		rp.max_tx_power = conn->max_tx_power;
7371 
7372 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7373 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7374 	}
7375 
7376 unlock:
7377 	hci_dev_unlock(hdev);
7378 	return err;
7379 }
7380 
7381 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7382 {
7383 	struct mgmt_pending_cmd *cmd = data;
7384 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7385 	struct mgmt_rp_get_clock_info rp;
7386 	struct hci_conn *conn = cmd->user_data;
7387 	u8 status = mgmt_status(err);
7388 
7389 	bt_dev_dbg(hdev, "err %d", err);
7390 
7391 	memset(&rp, 0, sizeof(rp));
7392 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7393 	rp.addr.type = cp->addr.type;
7394 
7395 	if (err)
7396 		goto complete;
7397 
7398 	rp.local_clock = cpu_to_le32(hdev->clock);
7399 
7400 	if (conn) {
7401 		rp.piconet_clock = cpu_to_le32(conn->clock);
7402 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7403 	}
7404 
7405 complete:
7406 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7407 			  sizeof(rp));
7408 
7409 	mgmt_pending_free(cmd);
7410 }
7411 
7412 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7413 {
7414 	struct mgmt_pending_cmd *cmd = data;
7415 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7416 	struct hci_cp_read_clock hci_cp;
7417 	struct hci_conn *conn;
7418 
7419 	memset(&hci_cp, 0, sizeof(hci_cp));
7420 	hci_read_clock_sync(hdev, &hci_cp);
7421 
7422 	/* Make sure connection still exists */
7423 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7424 	if (!conn || conn->state != BT_CONNECTED)
7425 		return MGMT_STATUS_NOT_CONNECTED;
7426 
7427 	cmd->user_data = conn;
7428 	hci_cp.handle = cpu_to_le16(conn->handle);
7429 	hci_cp.which = 0x01; /* Piconet clock */
7430 
7431 	return hci_read_clock_sync(hdev, &hci_cp);
7432 }
7433 
7434 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7435 								u16 len)
7436 {
7437 	struct mgmt_cp_get_clock_info *cp = data;
7438 	struct mgmt_rp_get_clock_info rp;
7439 	struct mgmt_pending_cmd *cmd;
7440 	struct hci_conn *conn;
7441 	int err;
7442 
7443 	bt_dev_dbg(hdev, "sock %p", sk);
7444 
7445 	memset(&rp, 0, sizeof(rp));
7446 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7447 	rp.addr.type = cp->addr.type;
7448 
7449 	if (cp->addr.type != BDADDR_BREDR)
7450 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7451 					 MGMT_STATUS_INVALID_PARAMS,
7452 					 &rp, sizeof(rp));
7453 
7454 	hci_dev_lock(hdev);
7455 
7456 	if (!hdev_is_powered(hdev)) {
7457 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7458 					MGMT_STATUS_NOT_POWERED, &rp,
7459 					sizeof(rp));
7460 		goto unlock;
7461 	}
7462 
7463 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7464 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7465 					       &cp->addr.bdaddr);
7466 		if (!conn || conn->state != BT_CONNECTED) {
7467 			err = mgmt_cmd_complete(sk, hdev->id,
7468 						MGMT_OP_GET_CLOCK_INFO,
7469 						MGMT_STATUS_NOT_CONNECTED,
7470 						&rp, sizeof(rp));
7471 			goto unlock;
7472 		}
7473 	} else {
7474 		conn = NULL;
7475 	}
7476 
7477 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7478 	if (!cmd)
7479 		err = -ENOMEM;
7480 	else
7481 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7482 					 get_clock_info_complete);
7483 
7484 	if (err < 0) {
7485 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7486 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7487 
7488 		if (cmd)
7489 			mgmt_pending_free(cmd);
7490 	}
7491 
7492 
7493 unlock:
7494 	hci_dev_unlock(hdev);
7495 	return err;
7496 }
7497 
7498 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7499 {
7500 	struct hci_conn *conn;
7501 
7502 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7503 	if (!conn)
7504 		return false;
7505 
7506 	if (conn->dst_type != type)
7507 		return false;
7508 
7509 	if (conn->state != BT_CONNECTED)
7510 		return false;
7511 
7512 	return true;
7513 }
7514 
7515 /* This function requires the caller holds hdev->lock */
7516 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7517 			       u8 addr_type, u8 auto_connect)
7518 {
7519 	struct hci_conn_params *params;
7520 
7521 	params = hci_conn_params_add(hdev, addr, addr_type);
7522 	if (!params)
7523 		return -EIO;
7524 
7525 	if (params->auto_connect == auto_connect)
7526 		return 0;
7527 
7528 	hci_pend_le_list_del_init(params);
7529 
7530 	switch (auto_connect) {
7531 	case HCI_AUTO_CONN_DISABLED:
7532 	case HCI_AUTO_CONN_LINK_LOSS:
7533 		/* If auto connect is being disabled when we're trying to
7534 		 * connect to device, keep connecting.
7535 		 */
7536 		if (params->explicit_connect)
7537 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7538 		break;
7539 	case HCI_AUTO_CONN_REPORT:
7540 		if (params->explicit_connect)
7541 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7542 		else
7543 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7544 		break;
7545 	case HCI_AUTO_CONN_DIRECT:
7546 	case HCI_AUTO_CONN_ALWAYS:
7547 		if (!is_connected(hdev, addr, addr_type))
7548 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7549 		break;
7550 	}
7551 
7552 	params->auto_connect = auto_connect;
7553 
7554 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7555 		   addr, addr_type, auto_connect);
7556 
7557 	return 0;
7558 }
7559 
7560 static void device_added(struct sock *sk, struct hci_dev *hdev,
7561 			 bdaddr_t *bdaddr, u8 type, u8 action)
7562 {
7563 	struct mgmt_ev_device_added ev;
7564 
7565 	bacpy(&ev.addr.bdaddr, bdaddr);
7566 	ev.addr.type = type;
7567 	ev.action = action;
7568 
7569 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7570 }
7571 
7572 static int add_device_sync(struct hci_dev *hdev, void *data)
7573 {
7574 	return hci_update_passive_scan_sync(hdev);
7575 }
7576 
7577 static int add_device(struct sock *sk, struct hci_dev *hdev,
7578 		      void *data, u16 len)
7579 {
7580 	struct mgmt_cp_add_device *cp = data;
7581 	u8 auto_conn, addr_type;
7582 	struct hci_conn_params *params;
7583 	int err;
7584 	u32 current_flags = 0;
7585 	u32 supported_flags;
7586 
7587 	bt_dev_dbg(hdev, "sock %p", sk);
7588 
7589 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7590 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7591 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7592 					 MGMT_STATUS_INVALID_PARAMS,
7593 					 &cp->addr, sizeof(cp->addr));
7594 
7595 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7596 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7597 					 MGMT_STATUS_INVALID_PARAMS,
7598 					 &cp->addr, sizeof(cp->addr));
7599 
7600 	hci_dev_lock(hdev);
7601 
7602 	if (cp->addr.type == BDADDR_BREDR) {
7603 		/* Only incoming connections action is supported for now */
7604 		if (cp->action != 0x01) {
7605 			err = mgmt_cmd_complete(sk, hdev->id,
7606 						MGMT_OP_ADD_DEVICE,
7607 						MGMT_STATUS_INVALID_PARAMS,
7608 						&cp->addr, sizeof(cp->addr));
7609 			goto unlock;
7610 		}
7611 
7612 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7613 						     &cp->addr.bdaddr,
7614 						     cp->addr.type, 0);
7615 		if (err)
7616 			goto unlock;
7617 
7618 		hci_update_scan(hdev);
7619 
7620 		goto added;
7621 	}
7622 
7623 	addr_type = le_addr_type(cp->addr.type);
7624 
7625 	if (cp->action == 0x02)
7626 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7627 	else if (cp->action == 0x01)
7628 		auto_conn = HCI_AUTO_CONN_DIRECT;
7629 	else
7630 		auto_conn = HCI_AUTO_CONN_REPORT;
7631 
7632 	/* Kernel internally uses conn_params with resolvable private
7633 	 * address, but Add Device allows only identity addresses.
7634 	 * Make sure it is enforced before calling
7635 	 * hci_conn_params_lookup.
7636 	 */
7637 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7638 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 					MGMT_STATUS_INVALID_PARAMS,
7640 					&cp->addr, sizeof(cp->addr));
7641 		goto unlock;
7642 	}
7643 
7644 	/* If the connection parameters don't exist for this device,
7645 	 * they will be created and configured with defaults.
7646 	 */
7647 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7648 				auto_conn) < 0) {
7649 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7650 					MGMT_STATUS_FAILED, &cp->addr,
7651 					sizeof(cp->addr));
7652 		goto unlock;
7653 	} else {
7654 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7655 						addr_type);
7656 		if (params)
7657 			current_flags = params->flags;
7658 	}
7659 
7660 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7661 	if (err < 0)
7662 		goto unlock;
7663 
7664 added:
7665 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7666 	supported_flags = hdev->conn_flags;
7667 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7668 			     supported_flags, current_flags);
7669 
7670 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7671 				MGMT_STATUS_SUCCESS, &cp->addr,
7672 				sizeof(cp->addr));
7673 
7674 unlock:
7675 	hci_dev_unlock(hdev);
7676 	return err;
7677 }
7678 
7679 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7680 			   bdaddr_t *bdaddr, u8 type)
7681 {
7682 	struct mgmt_ev_device_removed ev;
7683 
7684 	bacpy(&ev.addr.bdaddr, bdaddr);
7685 	ev.addr.type = type;
7686 
7687 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7688 }
7689 
7690 static int remove_device_sync(struct hci_dev *hdev, void *data)
7691 {
7692 	return hci_update_passive_scan_sync(hdev);
7693 }
7694 
7695 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7696 			 void *data, u16 len)
7697 {
7698 	struct mgmt_cp_remove_device *cp = data;
7699 	int err;
7700 
7701 	bt_dev_dbg(hdev, "sock %p", sk);
7702 
7703 	hci_dev_lock(hdev);
7704 
7705 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7706 		struct hci_conn_params *params;
7707 		u8 addr_type;
7708 
7709 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7710 			err = mgmt_cmd_complete(sk, hdev->id,
7711 						MGMT_OP_REMOVE_DEVICE,
7712 						MGMT_STATUS_INVALID_PARAMS,
7713 						&cp->addr, sizeof(cp->addr));
7714 			goto unlock;
7715 		}
7716 
7717 		if (cp->addr.type == BDADDR_BREDR) {
7718 			err = hci_bdaddr_list_del(&hdev->accept_list,
7719 						  &cp->addr.bdaddr,
7720 						  cp->addr.type);
7721 			if (err) {
7722 				err = mgmt_cmd_complete(sk, hdev->id,
7723 							MGMT_OP_REMOVE_DEVICE,
7724 							MGMT_STATUS_INVALID_PARAMS,
7725 							&cp->addr,
7726 							sizeof(cp->addr));
7727 				goto unlock;
7728 			}
7729 
7730 			hci_update_scan(hdev);
7731 
7732 			device_removed(sk, hdev, &cp->addr.bdaddr,
7733 				       cp->addr.type);
7734 			goto complete;
7735 		}
7736 
7737 		addr_type = le_addr_type(cp->addr.type);
7738 
7739 		/* Kernel internally uses conn_params with resolvable private
7740 		 * address, but Remove Device allows only identity addresses.
7741 		 * Make sure it is enforced before calling
7742 		 * hci_conn_params_lookup.
7743 		 */
7744 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7745 			err = mgmt_cmd_complete(sk, hdev->id,
7746 						MGMT_OP_REMOVE_DEVICE,
7747 						MGMT_STATUS_INVALID_PARAMS,
7748 						&cp->addr, sizeof(cp->addr));
7749 			goto unlock;
7750 		}
7751 
7752 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7753 						addr_type);
7754 		if (!params) {
7755 			err = mgmt_cmd_complete(sk, hdev->id,
7756 						MGMT_OP_REMOVE_DEVICE,
7757 						MGMT_STATUS_INVALID_PARAMS,
7758 						&cp->addr, sizeof(cp->addr));
7759 			goto unlock;
7760 		}
7761 
7762 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7763 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7764 			err = mgmt_cmd_complete(sk, hdev->id,
7765 						MGMT_OP_REMOVE_DEVICE,
7766 						MGMT_STATUS_INVALID_PARAMS,
7767 						&cp->addr, sizeof(cp->addr));
7768 			goto unlock;
7769 		}
7770 
7771 		hci_conn_params_free(params);
7772 
7773 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7774 	} else {
7775 		struct hci_conn_params *p, *tmp;
7776 		struct bdaddr_list *b, *btmp;
7777 
7778 		if (cp->addr.type) {
7779 			err = mgmt_cmd_complete(sk, hdev->id,
7780 						MGMT_OP_REMOVE_DEVICE,
7781 						MGMT_STATUS_INVALID_PARAMS,
7782 						&cp->addr, sizeof(cp->addr));
7783 			goto unlock;
7784 		}
7785 
7786 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7787 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7788 			list_del(&b->list);
7789 			kfree(b);
7790 		}
7791 
7792 		hci_update_scan(hdev);
7793 
7794 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7795 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7796 				continue;
7797 			device_removed(sk, hdev, &p->addr, p->addr_type);
7798 			if (p->explicit_connect) {
7799 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7800 				continue;
7801 			}
7802 			hci_conn_params_free(p);
7803 		}
7804 
7805 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7806 	}
7807 
7808 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7809 
7810 complete:
7811 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7812 				MGMT_STATUS_SUCCESS, &cp->addr,
7813 				sizeof(cp->addr));
7814 unlock:
7815 	hci_dev_unlock(hdev);
7816 	return err;
7817 }
7818 
7819 static int conn_update_sync(struct hci_dev *hdev, void *data)
7820 {
7821 	struct hci_conn_params *params = data;
7822 	struct hci_conn *conn;
7823 
7824 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7825 	if (!conn)
7826 		return -ECANCELED;
7827 
7828 	return hci_le_conn_update_sync(hdev, conn, params);
7829 }
7830 
7831 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7832 			   u16 len)
7833 {
7834 	struct mgmt_cp_load_conn_param *cp = data;
7835 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7836 				     sizeof(struct mgmt_conn_param));
7837 	u16 param_count, expected_len;
7838 	int i;
7839 
7840 	if (!lmp_le_capable(hdev))
7841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7842 				       MGMT_STATUS_NOT_SUPPORTED);
7843 
7844 	param_count = __le16_to_cpu(cp->param_count);
7845 	if (param_count > max_param_count) {
7846 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7847 			   param_count);
7848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7849 				       MGMT_STATUS_INVALID_PARAMS);
7850 	}
7851 
7852 	expected_len = struct_size(cp, params, param_count);
7853 	if (expected_len != len) {
7854 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7855 			   expected_len, len);
7856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7857 				       MGMT_STATUS_INVALID_PARAMS);
7858 	}
7859 
7860 	bt_dev_dbg(hdev, "param_count %u", param_count);
7861 
7862 	hci_dev_lock(hdev);
7863 
7864 	if (param_count > 1)
7865 		hci_conn_params_clear_disabled(hdev);
7866 
7867 	for (i = 0; i < param_count; i++) {
7868 		struct mgmt_conn_param *param = &cp->params[i];
7869 		struct hci_conn_params *hci_param;
7870 		u16 min, max, latency, timeout;
7871 		bool update = false;
7872 		u8 addr_type;
7873 
7874 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7875 			   param->addr.type);
7876 
7877 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7878 			addr_type = ADDR_LE_DEV_PUBLIC;
7879 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7880 			addr_type = ADDR_LE_DEV_RANDOM;
7881 		} else {
7882 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7883 			continue;
7884 		}
7885 
7886 		min = le16_to_cpu(param->min_interval);
7887 		max = le16_to_cpu(param->max_interval);
7888 		latency = le16_to_cpu(param->latency);
7889 		timeout = le16_to_cpu(param->timeout);
7890 
7891 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7892 			   min, max, latency, timeout);
7893 
7894 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7895 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7896 			continue;
7897 		}
7898 
7899 		/* Detect when the loading is for an existing parameter then
7900 		 * attempt to trigger the connection update procedure.
7901 		 */
7902 		if (!i && param_count == 1) {
7903 			hci_param = hci_conn_params_lookup(hdev,
7904 							   &param->addr.bdaddr,
7905 							   addr_type);
7906 			if (hci_param)
7907 				update = true;
7908 			else
7909 				hci_conn_params_clear_disabled(hdev);
7910 		}
7911 
7912 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7913 						addr_type);
7914 		if (!hci_param) {
7915 			bt_dev_err(hdev, "failed to add connection parameters");
7916 			continue;
7917 		}
7918 
7919 		hci_param->conn_min_interval = min;
7920 		hci_param->conn_max_interval = max;
7921 		hci_param->conn_latency = latency;
7922 		hci_param->supervision_timeout = timeout;
7923 
7924 		/* Check if we need to trigger a connection update */
7925 		if (update) {
7926 			struct hci_conn *conn;
7927 
7928 			/* Lookup for existing connection as central and check
7929 			 * if parameters match and if they don't then trigger
7930 			 * a connection update.
7931 			 */
7932 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7933 						       addr_type);
7934 			if (conn && conn->role == HCI_ROLE_MASTER &&
7935 			    (conn->le_conn_min_interval != min ||
7936 			     conn->le_conn_max_interval != max ||
7937 			     conn->le_conn_latency != latency ||
7938 			     conn->le_supv_timeout != timeout))
7939 				hci_cmd_sync_queue(hdev, conn_update_sync,
7940 						   hci_param, NULL);
7941 		}
7942 	}
7943 
7944 	hci_dev_unlock(hdev);
7945 
7946 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7947 				 NULL, 0);
7948 }
7949 
7950 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7951 			       void *data, u16 len)
7952 {
7953 	struct mgmt_cp_set_external_config *cp = data;
7954 	bool changed;
7955 	int err;
7956 
7957 	bt_dev_dbg(hdev, "sock %p", sk);
7958 
7959 	if (hdev_is_powered(hdev))
7960 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7961 				       MGMT_STATUS_REJECTED);
7962 
7963 	if (cp->config != 0x00 && cp->config != 0x01)
7964 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7965 				         MGMT_STATUS_INVALID_PARAMS);
7966 
7967 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7968 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7969 				       MGMT_STATUS_NOT_SUPPORTED);
7970 
7971 	hci_dev_lock(hdev);
7972 
7973 	if (cp->config)
7974 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7975 	else
7976 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7977 
7978 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7979 	if (err < 0)
7980 		goto unlock;
7981 
7982 	if (!changed)
7983 		goto unlock;
7984 
7985 	err = new_options(hdev, sk);
7986 
7987 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7988 		mgmt_index_removed(hdev);
7989 
7990 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7991 			hci_dev_set_flag(hdev, HCI_CONFIG);
7992 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7993 
7994 			queue_work(hdev->req_workqueue, &hdev->power_on);
7995 		} else {
7996 			set_bit(HCI_RAW, &hdev->flags);
7997 			mgmt_index_added(hdev);
7998 		}
7999 	}
8000 
8001 unlock:
8002 	hci_dev_unlock(hdev);
8003 	return err;
8004 }
8005 
8006 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8007 			      void *data, u16 len)
8008 {
8009 	struct mgmt_cp_set_public_address *cp = data;
8010 	bool changed;
8011 	int err;
8012 
8013 	bt_dev_dbg(hdev, "sock %p", sk);
8014 
8015 	if (hdev_is_powered(hdev))
8016 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8017 				       MGMT_STATUS_REJECTED);
8018 
8019 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8020 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8021 				       MGMT_STATUS_INVALID_PARAMS);
8022 
8023 	if (!hdev->set_bdaddr)
8024 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8025 				       MGMT_STATUS_NOT_SUPPORTED);
8026 
8027 	hci_dev_lock(hdev);
8028 
8029 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8030 	bacpy(&hdev->public_addr, &cp->bdaddr);
8031 
8032 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8033 	if (err < 0)
8034 		goto unlock;
8035 
8036 	if (!changed)
8037 		goto unlock;
8038 
8039 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8040 		err = new_options(hdev, sk);
8041 
8042 	if (is_configured(hdev)) {
8043 		mgmt_index_removed(hdev);
8044 
8045 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8046 
8047 		hci_dev_set_flag(hdev, HCI_CONFIG);
8048 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8049 
8050 		queue_work(hdev->req_workqueue, &hdev->power_on);
8051 	}
8052 
8053 unlock:
8054 	hci_dev_unlock(hdev);
8055 	return err;
8056 }
8057 
8058 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8059 					     int err)
8060 {
8061 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8062 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8063 	u8 *h192, *r192, *h256, *r256;
8064 	struct mgmt_pending_cmd *cmd = data;
8065 	struct sk_buff *skb = cmd->skb;
8066 	u8 status = mgmt_status(err);
8067 	u16 eir_len;
8068 
8069 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8070 		return;
8071 
8072 	if (!status) {
8073 		if (!skb)
8074 			status = MGMT_STATUS_FAILED;
8075 		else if (IS_ERR(skb))
8076 			status = mgmt_status(PTR_ERR(skb));
8077 		else
8078 			status = mgmt_status(skb->data[0]);
8079 	}
8080 
8081 	bt_dev_dbg(hdev, "status %u", status);
8082 
8083 	mgmt_cp = cmd->param;
8084 
8085 	if (status) {
8086 		status = mgmt_status(status);
8087 		eir_len = 0;
8088 
8089 		h192 = NULL;
8090 		r192 = NULL;
8091 		h256 = NULL;
8092 		r256 = NULL;
8093 	} else if (!bredr_sc_enabled(hdev)) {
8094 		struct hci_rp_read_local_oob_data *rp;
8095 
8096 		if (skb->len != sizeof(*rp)) {
8097 			status = MGMT_STATUS_FAILED;
8098 			eir_len = 0;
8099 		} else {
8100 			status = MGMT_STATUS_SUCCESS;
8101 			rp = (void *)skb->data;
8102 
8103 			eir_len = 5 + 18 + 18;
8104 			h192 = rp->hash;
8105 			r192 = rp->rand;
8106 			h256 = NULL;
8107 			r256 = NULL;
8108 		}
8109 	} else {
8110 		struct hci_rp_read_local_oob_ext_data *rp;
8111 
8112 		if (skb->len != sizeof(*rp)) {
8113 			status = MGMT_STATUS_FAILED;
8114 			eir_len = 0;
8115 		} else {
8116 			status = MGMT_STATUS_SUCCESS;
8117 			rp = (void *)skb->data;
8118 
8119 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8120 				eir_len = 5 + 18 + 18;
8121 				h192 = NULL;
8122 				r192 = NULL;
8123 			} else {
8124 				eir_len = 5 + 18 + 18 + 18 + 18;
8125 				h192 = rp->hash192;
8126 				r192 = rp->rand192;
8127 			}
8128 
8129 			h256 = rp->hash256;
8130 			r256 = rp->rand256;
8131 		}
8132 	}
8133 
8134 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8135 	if (!mgmt_rp)
8136 		goto done;
8137 
8138 	if (eir_len == 0)
8139 		goto send_rsp;
8140 
8141 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8142 				  hdev->dev_class, 3);
8143 
8144 	if (h192 && r192) {
8145 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8146 					  EIR_SSP_HASH_C192, h192, 16);
8147 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8148 					  EIR_SSP_RAND_R192, r192, 16);
8149 	}
8150 
8151 	if (h256 && r256) {
8152 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 					  EIR_SSP_HASH_C256, h256, 16);
8154 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 					  EIR_SSP_RAND_R256, r256, 16);
8156 	}
8157 
8158 send_rsp:
8159 	mgmt_rp->type = mgmt_cp->type;
8160 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8161 
8162 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8163 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8164 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8165 	if (err < 0 || status)
8166 		goto done;
8167 
8168 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8169 
8170 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8171 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8172 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8173 done:
8174 	if (skb && !IS_ERR(skb))
8175 		kfree_skb(skb);
8176 
8177 	kfree(mgmt_rp);
8178 	mgmt_pending_remove(cmd);
8179 }
8180 
8181 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8182 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8183 {
8184 	struct mgmt_pending_cmd *cmd;
8185 	int err;
8186 
8187 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8188 			       cp, sizeof(*cp));
8189 	if (!cmd)
8190 		return -ENOMEM;
8191 
8192 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8193 				 read_local_oob_ext_data_complete);
8194 
8195 	if (err < 0) {
8196 		mgmt_pending_remove(cmd);
8197 		return err;
8198 	}
8199 
8200 	return 0;
8201 }
8202 
8203 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8204 				   void *data, u16 data_len)
8205 {
8206 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8207 	struct mgmt_rp_read_local_oob_ext_data *rp;
8208 	size_t rp_len;
8209 	u16 eir_len;
8210 	u8 status, flags, role, addr[7], hash[16], rand[16];
8211 	int err;
8212 
8213 	bt_dev_dbg(hdev, "sock %p", sk);
8214 
8215 	if (hdev_is_powered(hdev)) {
8216 		switch (cp->type) {
8217 		case BIT(BDADDR_BREDR):
8218 			status = mgmt_bredr_support(hdev);
8219 			if (status)
8220 				eir_len = 0;
8221 			else
8222 				eir_len = 5;
8223 			break;
8224 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8225 			status = mgmt_le_support(hdev);
8226 			if (status)
8227 				eir_len = 0;
8228 			else
8229 				eir_len = 9 + 3 + 18 + 18 + 3;
8230 			break;
8231 		default:
8232 			status = MGMT_STATUS_INVALID_PARAMS;
8233 			eir_len = 0;
8234 			break;
8235 		}
8236 	} else {
8237 		status = MGMT_STATUS_NOT_POWERED;
8238 		eir_len = 0;
8239 	}
8240 
8241 	rp_len = sizeof(*rp) + eir_len;
8242 	rp = kmalloc(rp_len, GFP_ATOMIC);
8243 	if (!rp)
8244 		return -ENOMEM;
8245 
8246 	if (!status && !lmp_ssp_capable(hdev)) {
8247 		status = MGMT_STATUS_NOT_SUPPORTED;
8248 		eir_len = 0;
8249 	}
8250 
8251 	if (status)
8252 		goto complete;
8253 
8254 	hci_dev_lock(hdev);
8255 
8256 	eir_len = 0;
8257 	switch (cp->type) {
8258 	case BIT(BDADDR_BREDR):
8259 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8260 			err = read_local_ssp_oob_req(hdev, sk, cp);
8261 			hci_dev_unlock(hdev);
8262 			if (!err)
8263 				goto done;
8264 
8265 			status = MGMT_STATUS_FAILED;
8266 			goto complete;
8267 		} else {
8268 			eir_len = eir_append_data(rp->eir, eir_len,
8269 						  EIR_CLASS_OF_DEV,
8270 						  hdev->dev_class, 3);
8271 		}
8272 		break;
8273 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8274 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8275 		    smp_generate_oob(hdev, hash, rand) < 0) {
8276 			hci_dev_unlock(hdev);
8277 			status = MGMT_STATUS_FAILED;
8278 			goto complete;
8279 		}
8280 
8281 		/* This should return the active RPA, but since the RPA
8282 		 * is only programmed on demand, it is really hard to fill
8283 		 * this in at the moment. For now disallow retrieving
8284 		 * local out-of-band data when privacy is in use.
8285 		 *
8286 		 * Returning the identity address will not help here since
8287 		 * pairing happens before the identity resolving key is
8288 		 * known and thus the connection establishment happens
8289 		 * based on the RPA and not the identity address.
8290 		 */
8291 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8292 			hci_dev_unlock(hdev);
8293 			status = MGMT_STATUS_REJECTED;
8294 			goto complete;
8295 		}
8296 
8297 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8298 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8299 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8300 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8301 			memcpy(addr, &hdev->static_addr, 6);
8302 			addr[6] = 0x01;
8303 		} else {
8304 			memcpy(addr, &hdev->bdaddr, 6);
8305 			addr[6] = 0x00;
8306 		}
8307 
8308 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8309 					  addr, sizeof(addr));
8310 
8311 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8312 			role = 0x02;
8313 		else
8314 			role = 0x01;
8315 
8316 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8317 					  &role, sizeof(role));
8318 
8319 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8320 			eir_len = eir_append_data(rp->eir, eir_len,
8321 						  EIR_LE_SC_CONFIRM,
8322 						  hash, sizeof(hash));
8323 
8324 			eir_len = eir_append_data(rp->eir, eir_len,
8325 						  EIR_LE_SC_RANDOM,
8326 						  rand, sizeof(rand));
8327 		}
8328 
8329 		flags = mgmt_get_adv_discov_flags(hdev);
8330 
8331 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8332 			flags |= LE_AD_NO_BREDR;
8333 
8334 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8335 					  &flags, sizeof(flags));
8336 		break;
8337 	}
8338 
8339 	hci_dev_unlock(hdev);
8340 
8341 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8342 
8343 	status = MGMT_STATUS_SUCCESS;
8344 
8345 complete:
8346 	rp->type = cp->type;
8347 	rp->eir_len = cpu_to_le16(eir_len);
8348 
8349 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8350 				status, rp, sizeof(*rp) + eir_len);
8351 	if (err < 0 || status)
8352 		goto done;
8353 
8354 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8355 				 rp, sizeof(*rp) + eir_len,
8356 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8357 
8358 done:
8359 	kfree(rp);
8360 
8361 	return err;
8362 }
8363 
8364 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8365 {
8366 	u32 flags = 0;
8367 
8368 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8369 	flags |= MGMT_ADV_FLAG_DISCOV;
8370 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8371 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8372 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8373 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8374 	flags |= MGMT_ADV_PARAM_DURATION;
8375 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8376 	flags |= MGMT_ADV_PARAM_INTERVALS;
8377 	flags |= MGMT_ADV_PARAM_TX_POWER;
8378 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8379 
8380 	/* In extended adv TX_POWER returned from Set Adv Param
8381 	 * will be always valid.
8382 	 */
8383 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8384 		flags |= MGMT_ADV_FLAG_TX_POWER;
8385 
8386 	if (ext_adv_capable(hdev)) {
8387 		flags |= MGMT_ADV_FLAG_SEC_1M;
8388 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8389 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8390 
8391 		if (le_2m_capable(hdev))
8392 			flags |= MGMT_ADV_FLAG_SEC_2M;
8393 
8394 		if (le_coded_capable(hdev))
8395 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8396 	}
8397 
8398 	return flags;
8399 }
8400 
8401 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8402 			     void *data, u16 data_len)
8403 {
8404 	struct mgmt_rp_read_adv_features *rp;
8405 	size_t rp_len;
8406 	int err;
8407 	struct adv_info *adv_instance;
8408 	u32 supported_flags;
8409 	u8 *instance;
8410 
8411 	bt_dev_dbg(hdev, "sock %p", sk);
8412 
8413 	if (!lmp_le_capable(hdev))
8414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8415 				       MGMT_STATUS_REJECTED);
8416 
8417 	hci_dev_lock(hdev);
8418 
8419 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8420 	rp = kmalloc(rp_len, GFP_ATOMIC);
8421 	if (!rp) {
8422 		hci_dev_unlock(hdev);
8423 		return -ENOMEM;
8424 	}
8425 
8426 	supported_flags = get_supported_adv_flags(hdev);
8427 
8428 	rp->supported_flags = cpu_to_le32(supported_flags);
8429 	rp->max_adv_data_len = max_adv_len(hdev);
8430 	rp->max_scan_rsp_len = max_adv_len(hdev);
8431 	rp->max_instances = hdev->le_num_of_adv_sets;
8432 	rp->num_instances = hdev->adv_instance_cnt;
8433 
8434 	instance = rp->instance;
8435 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8436 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8437 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8438 			*instance = adv_instance->instance;
8439 			instance++;
8440 		} else {
8441 			rp->num_instances--;
8442 			rp_len--;
8443 		}
8444 	}
8445 
8446 	hci_dev_unlock(hdev);
8447 
8448 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8449 				MGMT_STATUS_SUCCESS, rp, rp_len);
8450 
8451 	kfree(rp);
8452 
8453 	return err;
8454 }
8455 
8456 static u8 calculate_name_len(struct hci_dev *hdev)
8457 {
8458 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8459 
8460 	return eir_append_local_name(hdev, buf, 0);
8461 }
8462 
8463 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8464 			   bool is_adv_data)
8465 {
8466 	u8 max_len = max_adv_len(hdev);
8467 
8468 	if (is_adv_data) {
8469 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8470 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8471 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8472 			max_len -= 3;
8473 
8474 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8475 			max_len -= 3;
8476 	} else {
8477 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8478 			max_len -= calculate_name_len(hdev);
8479 
8480 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8481 			max_len -= 4;
8482 	}
8483 
8484 	return max_len;
8485 }
8486 
8487 static bool flags_managed(u32 adv_flags)
8488 {
8489 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8490 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8491 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8492 }
8493 
8494 static bool tx_power_managed(u32 adv_flags)
8495 {
8496 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8497 }
8498 
8499 static bool name_managed(u32 adv_flags)
8500 {
8501 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8502 }
8503 
8504 static bool appearance_managed(u32 adv_flags)
8505 {
8506 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8507 }
8508 
8509 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8510 			      u8 len, bool is_adv_data)
8511 {
8512 	int i, cur_len;
8513 	u8 max_len;
8514 
8515 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8516 
8517 	if (len > max_len)
8518 		return false;
8519 
8520 	/* Make sure that the data is correctly formatted. */
8521 	for (i = 0; i < len; i += (cur_len + 1)) {
8522 		cur_len = data[i];
8523 
8524 		if (!cur_len)
8525 			continue;
8526 
8527 		if (data[i + 1] == EIR_FLAGS &&
8528 		    (!is_adv_data || flags_managed(adv_flags)))
8529 			return false;
8530 
8531 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8532 			return false;
8533 
8534 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8535 			return false;
8536 
8537 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8538 			return false;
8539 
8540 		if (data[i + 1] == EIR_APPEARANCE &&
8541 		    appearance_managed(adv_flags))
8542 			return false;
8543 
8544 		/* If the current field length would exceed the total data
8545 		 * length, then it's invalid.
8546 		 */
8547 		if (i + cur_len >= len)
8548 			return false;
8549 	}
8550 
8551 	return true;
8552 }
8553 
8554 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8555 {
8556 	u32 supported_flags, phy_flags;
8557 
8558 	/* The current implementation only supports a subset of the specified
8559 	 * flags. Also need to check mutual exclusiveness of sec flags.
8560 	 */
8561 	supported_flags = get_supported_adv_flags(hdev);
8562 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8563 	if (adv_flags & ~supported_flags ||
8564 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8565 		return false;
8566 
8567 	return true;
8568 }
8569 
8570 static bool adv_busy(struct hci_dev *hdev)
8571 {
8572 	return pending_find(MGMT_OP_SET_LE, hdev);
8573 }
8574 
8575 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8576 			     int err)
8577 {
8578 	struct adv_info *adv, *n;
8579 
8580 	bt_dev_dbg(hdev, "err %d", err);
8581 
8582 	hci_dev_lock(hdev);
8583 
8584 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8585 		u8 instance;
8586 
8587 		if (!adv->pending)
8588 			continue;
8589 
8590 		if (!err) {
8591 			adv->pending = false;
8592 			continue;
8593 		}
8594 
8595 		instance = adv->instance;
8596 
8597 		if (hdev->cur_adv_instance == instance)
8598 			cancel_adv_timeout(hdev);
8599 
8600 		hci_remove_adv_instance(hdev, instance);
8601 		mgmt_advertising_removed(sk, hdev, instance);
8602 	}
8603 
8604 	hci_dev_unlock(hdev);
8605 }
8606 
8607 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8608 {
8609 	struct mgmt_pending_cmd *cmd = data;
8610 	struct mgmt_cp_add_advertising *cp = cmd->param;
8611 	struct mgmt_rp_add_advertising rp;
8612 
8613 	memset(&rp, 0, sizeof(rp));
8614 
8615 	rp.instance = cp->instance;
8616 
8617 	if (err)
8618 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8619 				mgmt_status(err));
8620 	else
8621 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8622 				  mgmt_status(err), &rp, sizeof(rp));
8623 
8624 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8625 
8626 	mgmt_pending_free(cmd);
8627 }
8628 
8629 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8630 {
8631 	struct mgmt_pending_cmd *cmd = data;
8632 	struct mgmt_cp_add_advertising *cp = cmd->param;
8633 
8634 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8635 }
8636 
8637 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8638 			   void *data, u16 data_len)
8639 {
8640 	struct mgmt_cp_add_advertising *cp = data;
8641 	struct mgmt_rp_add_advertising rp;
8642 	u32 flags;
8643 	u8 status;
8644 	u16 timeout, duration;
8645 	unsigned int prev_instance_cnt;
8646 	u8 schedule_instance = 0;
8647 	struct adv_info *adv, *next_instance;
8648 	int err;
8649 	struct mgmt_pending_cmd *cmd;
8650 
8651 	bt_dev_dbg(hdev, "sock %p", sk);
8652 
8653 	status = mgmt_le_support(hdev);
8654 	if (status)
8655 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8656 				       status);
8657 
8658 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8659 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 				       MGMT_STATUS_INVALID_PARAMS);
8661 
8662 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8664 				       MGMT_STATUS_INVALID_PARAMS);
8665 
8666 	flags = __le32_to_cpu(cp->flags);
8667 	timeout = __le16_to_cpu(cp->timeout);
8668 	duration = __le16_to_cpu(cp->duration);
8669 
8670 	if (!requested_adv_flags_are_valid(hdev, flags))
8671 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8672 				       MGMT_STATUS_INVALID_PARAMS);
8673 
8674 	hci_dev_lock(hdev);
8675 
8676 	if (timeout && !hdev_is_powered(hdev)) {
8677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8678 				      MGMT_STATUS_REJECTED);
8679 		goto unlock;
8680 	}
8681 
8682 	if (adv_busy(hdev)) {
8683 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8684 				      MGMT_STATUS_BUSY);
8685 		goto unlock;
8686 	}
8687 
8688 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8689 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8690 			       cp->scan_rsp_len, false)) {
8691 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8692 				      MGMT_STATUS_INVALID_PARAMS);
8693 		goto unlock;
8694 	}
8695 
8696 	prev_instance_cnt = hdev->adv_instance_cnt;
8697 
8698 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8699 				   cp->adv_data_len, cp->data,
8700 				   cp->scan_rsp_len,
8701 				   cp->data + cp->adv_data_len,
8702 				   timeout, duration,
8703 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8704 				   hdev->le_adv_min_interval,
8705 				   hdev->le_adv_max_interval, 0);
8706 	if (IS_ERR(adv)) {
8707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708 				      MGMT_STATUS_FAILED);
8709 		goto unlock;
8710 	}
8711 
8712 	/* Only trigger an advertising added event if a new instance was
8713 	 * actually added.
8714 	 */
8715 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8716 		mgmt_advertising_added(sk, hdev, cp->instance);
8717 
8718 	if (hdev->cur_adv_instance == cp->instance) {
8719 		/* If the currently advertised instance is being changed then
8720 		 * cancel the current advertising and schedule the next
8721 		 * instance. If there is only one instance then the overridden
8722 		 * advertising data will be visible right away.
8723 		 */
8724 		cancel_adv_timeout(hdev);
8725 
8726 		next_instance = hci_get_next_instance(hdev, cp->instance);
8727 		if (next_instance)
8728 			schedule_instance = next_instance->instance;
8729 	} else if (!hdev->adv_instance_timeout) {
8730 		/* Immediately advertise the new instance if no other
8731 		 * instance is currently being advertised.
8732 		 */
8733 		schedule_instance = cp->instance;
8734 	}
8735 
8736 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8737 	 * there is no instance to be advertised then we have no HCI
8738 	 * communication to make. Simply return.
8739 	 */
8740 	if (!hdev_is_powered(hdev) ||
8741 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8742 	    !schedule_instance) {
8743 		rp.instance = cp->instance;
8744 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8745 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8746 		goto unlock;
8747 	}
8748 
8749 	/* We're good to go, update advertising data, parameters, and start
8750 	 * advertising.
8751 	 */
8752 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8753 			       data_len);
8754 	if (!cmd) {
8755 		err = -ENOMEM;
8756 		goto unlock;
8757 	}
8758 
8759 	cp->instance = schedule_instance;
8760 
8761 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8762 				 add_advertising_complete);
8763 	if (err < 0)
8764 		mgmt_pending_free(cmd);
8765 
8766 unlock:
8767 	hci_dev_unlock(hdev);
8768 
8769 	return err;
8770 }
8771 
8772 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8773 					int err)
8774 {
8775 	struct mgmt_pending_cmd *cmd = data;
8776 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8777 	struct mgmt_rp_add_ext_adv_params rp;
8778 	struct adv_info *adv;
8779 	u32 flags;
8780 
8781 	BT_DBG("%s", hdev->name);
8782 
8783 	hci_dev_lock(hdev);
8784 
8785 	adv = hci_find_adv_instance(hdev, cp->instance);
8786 	if (!adv)
8787 		goto unlock;
8788 
8789 	rp.instance = cp->instance;
8790 	rp.tx_power = adv->tx_power;
8791 
8792 	/* While we're at it, inform userspace of the available space for this
8793 	 * advertisement, given the flags that will be used.
8794 	 */
8795 	flags = __le32_to_cpu(cp->flags);
8796 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8797 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8798 
8799 	if (err) {
8800 		/* If this advertisement was previously advertising and we
8801 		 * failed to update it, we signal that it has been removed and
8802 		 * delete its structure
8803 		 */
8804 		if (!adv->pending)
8805 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8806 
8807 		hci_remove_adv_instance(hdev, cp->instance);
8808 
8809 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8810 				mgmt_status(err));
8811 	} else {
8812 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8813 				  mgmt_status(err), &rp, sizeof(rp));
8814 	}
8815 
8816 unlock:
8817 	mgmt_pending_free(cmd);
8818 
8819 	hci_dev_unlock(hdev);
8820 }
8821 
8822 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8823 {
8824 	struct mgmt_pending_cmd *cmd = data;
8825 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8826 
8827 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8828 }
8829 
8830 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8831 			      void *data, u16 data_len)
8832 {
8833 	struct mgmt_cp_add_ext_adv_params *cp = data;
8834 	struct mgmt_rp_add_ext_adv_params rp;
8835 	struct mgmt_pending_cmd *cmd = NULL;
8836 	struct adv_info *adv;
8837 	u32 flags, min_interval, max_interval;
8838 	u16 timeout, duration;
8839 	u8 status;
8840 	s8 tx_power;
8841 	int err;
8842 
8843 	BT_DBG("%s", hdev->name);
8844 
8845 	status = mgmt_le_support(hdev);
8846 	if (status)
8847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 				       status);
8849 
8850 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8851 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8852 				       MGMT_STATUS_INVALID_PARAMS);
8853 
8854 	/* The purpose of breaking add_advertising into two separate MGMT calls
8855 	 * for params and data is to allow more parameters to be added to this
8856 	 * structure in the future. For this reason, we verify that we have the
8857 	 * bare minimum structure we know of when the interface was defined. Any
8858 	 * extra parameters we don't know about will be ignored in this request.
8859 	 */
8860 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8861 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 				       MGMT_STATUS_INVALID_PARAMS);
8863 
8864 	flags = __le32_to_cpu(cp->flags);
8865 
8866 	if (!requested_adv_flags_are_valid(hdev, flags))
8867 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 				       MGMT_STATUS_INVALID_PARAMS);
8869 
8870 	hci_dev_lock(hdev);
8871 
8872 	/* In new interface, we require that we are powered to register */
8873 	if (!hdev_is_powered(hdev)) {
8874 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8875 				      MGMT_STATUS_REJECTED);
8876 		goto unlock;
8877 	}
8878 
8879 	if (adv_busy(hdev)) {
8880 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 				      MGMT_STATUS_BUSY);
8882 		goto unlock;
8883 	}
8884 
8885 	/* Parse defined parameters from request, use defaults otherwise */
8886 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8887 		  __le16_to_cpu(cp->timeout) : 0;
8888 
8889 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8890 		   __le16_to_cpu(cp->duration) :
8891 		   hdev->def_multi_adv_rotation_duration;
8892 
8893 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8894 		       __le32_to_cpu(cp->min_interval) :
8895 		       hdev->le_adv_min_interval;
8896 
8897 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8898 		       __le32_to_cpu(cp->max_interval) :
8899 		       hdev->le_adv_max_interval;
8900 
8901 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8902 		   cp->tx_power :
8903 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8904 
8905 	/* Create advertising instance with no advertising or response data */
8906 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8907 				   timeout, duration, tx_power, min_interval,
8908 				   max_interval, 0);
8909 
8910 	if (IS_ERR(adv)) {
8911 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8912 				      MGMT_STATUS_FAILED);
8913 		goto unlock;
8914 	}
8915 
8916 	/* Submit request for advertising params if ext adv available */
8917 	if (ext_adv_capable(hdev)) {
8918 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8919 				       data, data_len);
8920 		if (!cmd) {
8921 			err = -ENOMEM;
8922 			hci_remove_adv_instance(hdev, cp->instance);
8923 			goto unlock;
8924 		}
8925 
8926 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8927 					 add_ext_adv_params_complete);
8928 		if (err < 0)
8929 			mgmt_pending_free(cmd);
8930 	} else {
8931 		rp.instance = cp->instance;
8932 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8933 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8934 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8935 		err = mgmt_cmd_complete(sk, hdev->id,
8936 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8937 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8938 	}
8939 
8940 unlock:
8941 	hci_dev_unlock(hdev);
8942 
8943 	return err;
8944 }
8945 
8946 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8947 {
8948 	struct mgmt_pending_cmd *cmd = data;
8949 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8950 	struct mgmt_rp_add_advertising rp;
8951 
8952 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8953 
8954 	memset(&rp, 0, sizeof(rp));
8955 
8956 	rp.instance = cp->instance;
8957 
8958 	if (err)
8959 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8960 				mgmt_status(err));
8961 	else
8962 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8963 				  mgmt_status(err), &rp, sizeof(rp));
8964 
8965 	mgmt_pending_free(cmd);
8966 }
8967 
8968 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8969 {
8970 	struct mgmt_pending_cmd *cmd = data;
8971 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8972 	int err;
8973 
8974 	if (ext_adv_capable(hdev)) {
8975 		err = hci_update_adv_data_sync(hdev, cp->instance);
8976 		if (err)
8977 			return err;
8978 
8979 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8980 		if (err)
8981 			return err;
8982 
8983 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8984 	}
8985 
8986 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8987 }
8988 
8989 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8990 			    u16 data_len)
8991 {
8992 	struct mgmt_cp_add_ext_adv_data *cp = data;
8993 	struct mgmt_rp_add_ext_adv_data rp;
8994 	u8 schedule_instance = 0;
8995 	struct adv_info *next_instance;
8996 	struct adv_info *adv_instance;
8997 	int err = 0;
8998 	struct mgmt_pending_cmd *cmd;
8999 
9000 	BT_DBG("%s", hdev->name);
9001 
9002 	hci_dev_lock(hdev);
9003 
9004 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9005 
9006 	if (!adv_instance) {
9007 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9008 				      MGMT_STATUS_INVALID_PARAMS);
9009 		goto unlock;
9010 	}
9011 
9012 	/* In new interface, we require that we are powered to register */
9013 	if (!hdev_is_powered(hdev)) {
9014 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9015 				      MGMT_STATUS_REJECTED);
9016 		goto clear_new_instance;
9017 	}
9018 
9019 	if (adv_busy(hdev)) {
9020 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9021 				      MGMT_STATUS_BUSY);
9022 		goto clear_new_instance;
9023 	}
9024 
9025 	/* Validate new data */
9026 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9027 			       cp->adv_data_len, true) ||
9028 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9029 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 				      MGMT_STATUS_INVALID_PARAMS);
9032 		goto clear_new_instance;
9033 	}
9034 
9035 	/* Set the data in the advertising instance */
9036 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9037 				  cp->data, cp->scan_rsp_len,
9038 				  cp->data + cp->adv_data_len);
9039 
9040 	/* If using software rotation, determine next instance to use */
9041 	if (hdev->cur_adv_instance == cp->instance) {
9042 		/* If the currently advertised instance is being changed
9043 		 * then cancel the current advertising and schedule the
9044 		 * next instance. If there is only one instance then the
9045 		 * overridden advertising data will be visible right
9046 		 * away
9047 		 */
9048 		cancel_adv_timeout(hdev);
9049 
9050 		next_instance = hci_get_next_instance(hdev, cp->instance);
9051 		if (next_instance)
9052 			schedule_instance = next_instance->instance;
9053 	} else if (!hdev->adv_instance_timeout) {
9054 		/* Immediately advertise the new instance if no other
9055 		 * instance is currently being advertised.
9056 		 */
9057 		schedule_instance = cp->instance;
9058 	}
9059 
9060 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9061 	 * be advertised then we have no HCI communication to make.
9062 	 * Simply return.
9063 	 */
9064 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9065 		if (adv_instance->pending) {
9066 			mgmt_advertising_added(sk, hdev, cp->instance);
9067 			adv_instance->pending = false;
9068 		}
9069 		rp.instance = cp->instance;
9070 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9071 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9072 		goto unlock;
9073 	}
9074 
9075 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9076 			       data_len);
9077 	if (!cmd) {
9078 		err = -ENOMEM;
9079 		goto clear_new_instance;
9080 	}
9081 
9082 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9083 				 add_ext_adv_data_complete);
9084 	if (err < 0) {
9085 		mgmt_pending_free(cmd);
9086 		goto clear_new_instance;
9087 	}
9088 
9089 	/* We were successful in updating data, so trigger advertising_added
9090 	 * event if this is an instance that wasn't previously advertising. If
9091 	 * a failure occurs in the requests we initiated, we will remove the
9092 	 * instance again in add_advertising_complete
9093 	 */
9094 	if (adv_instance->pending)
9095 		mgmt_advertising_added(sk, hdev, cp->instance);
9096 
9097 	goto unlock;
9098 
9099 clear_new_instance:
9100 	hci_remove_adv_instance(hdev, cp->instance);
9101 
9102 unlock:
9103 	hci_dev_unlock(hdev);
9104 
9105 	return err;
9106 }
9107 
9108 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9109 					int err)
9110 {
9111 	struct mgmt_pending_cmd *cmd = data;
9112 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9113 	struct mgmt_rp_remove_advertising rp;
9114 
9115 	bt_dev_dbg(hdev, "err %d", err);
9116 
9117 	memset(&rp, 0, sizeof(rp));
9118 	rp.instance = cp->instance;
9119 
9120 	if (err)
9121 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9122 				mgmt_status(err));
9123 	else
9124 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9125 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9126 
9127 	mgmt_pending_free(cmd);
9128 }
9129 
9130 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9131 {
9132 	struct mgmt_pending_cmd *cmd = data;
9133 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9134 	int err;
9135 
9136 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9137 	if (err)
9138 		return err;
9139 
9140 	if (list_empty(&hdev->adv_instances))
9141 		err = hci_disable_advertising_sync(hdev);
9142 
9143 	return err;
9144 }
9145 
9146 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9147 			      void *data, u16 data_len)
9148 {
9149 	struct mgmt_cp_remove_advertising *cp = data;
9150 	struct mgmt_pending_cmd *cmd;
9151 	int err;
9152 
9153 	bt_dev_dbg(hdev, "sock %p", sk);
9154 
9155 	hci_dev_lock(hdev);
9156 
9157 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9158 		err = mgmt_cmd_status(sk, hdev->id,
9159 				      MGMT_OP_REMOVE_ADVERTISING,
9160 				      MGMT_STATUS_INVALID_PARAMS);
9161 		goto unlock;
9162 	}
9163 
9164 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9165 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9166 				      MGMT_STATUS_BUSY);
9167 		goto unlock;
9168 	}
9169 
9170 	if (list_empty(&hdev->adv_instances)) {
9171 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9172 				      MGMT_STATUS_INVALID_PARAMS);
9173 		goto unlock;
9174 	}
9175 
9176 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9177 			       data_len);
9178 	if (!cmd) {
9179 		err = -ENOMEM;
9180 		goto unlock;
9181 	}
9182 
9183 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9184 				 remove_advertising_complete);
9185 	if (err < 0)
9186 		mgmt_pending_free(cmd);
9187 
9188 unlock:
9189 	hci_dev_unlock(hdev);
9190 
9191 	return err;
9192 }
9193 
9194 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9195 			     void *data, u16 data_len)
9196 {
9197 	struct mgmt_cp_get_adv_size_info *cp = data;
9198 	struct mgmt_rp_get_adv_size_info rp;
9199 	u32 flags, supported_flags;
9200 
9201 	bt_dev_dbg(hdev, "sock %p", sk);
9202 
9203 	if (!lmp_le_capable(hdev))
9204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9205 				       MGMT_STATUS_REJECTED);
9206 
9207 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9209 				       MGMT_STATUS_INVALID_PARAMS);
9210 
9211 	flags = __le32_to_cpu(cp->flags);
9212 
9213 	/* The current implementation only supports a subset of the specified
9214 	 * flags.
9215 	 */
9216 	supported_flags = get_supported_adv_flags(hdev);
9217 	if (flags & ~supported_flags)
9218 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 				       MGMT_STATUS_INVALID_PARAMS);
9220 
9221 	rp.instance = cp->instance;
9222 	rp.flags = cp->flags;
9223 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9224 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9225 
9226 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9227 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9228 }
9229 
9230 static const struct hci_mgmt_handler mgmt_handlers[] = {
9231 	{ NULL }, /* 0x0000 (no command) */
9232 	{ read_version,            MGMT_READ_VERSION_SIZE,
9233 						HCI_MGMT_NO_HDEV |
9234 						HCI_MGMT_UNTRUSTED },
9235 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9236 						HCI_MGMT_NO_HDEV |
9237 						HCI_MGMT_UNTRUSTED },
9238 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9239 						HCI_MGMT_NO_HDEV |
9240 						HCI_MGMT_UNTRUSTED },
9241 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9242 						HCI_MGMT_UNTRUSTED },
9243 	{ set_powered,             MGMT_SETTING_SIZE },
9244 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9245 	{ set_connectable,         MGMT_SETTING_SIZE },
9246 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9247 	{ set_bondable,            MGMT_SETTING_SIZE },
9248 	{ set_link_security,       MGMT_SETTING_SIZE },
9249 	{ set_ssp,                 MGMT_SETTING_SIZE },
9250 	{ set_hs,                  MGMT_SETTING_SIZE },
9251 	{ set_le,                  MGMT_SETTING_SIZE },
9252 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9253 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9254 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9255 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9256 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9257 						HCI_MGMT_VAR_LEN },
9258 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9259 						HCI_MGMT_VAR_LEN },
9260 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9261 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9262 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9263 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9264 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9265 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9266 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9267 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9268 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9269 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9270 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9271 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9272 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9273 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9274 						HCI_MGMT_VAR_LEN },
9275 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9276 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9277 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9278 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9279 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9280 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9281 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9282 	{ set_advertising,         MGMT_SETTING_SIZE },
9283 	{ set_bredr,               MGMT_SETTING_SIZE },
9284 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9285 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9286 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9287 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9288 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9289 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9290 						HCI_MGMT_VAR_LEN },
9291 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9292 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9293 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9294 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9295 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9296 						HCI_MGMT_VAR_LEN },
9297 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9298 						HCI_MGMT_NO_HDEV |
9299 						HCI_MGMT_UNTRUSTED },
9300 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9301 						HCI_MGMT_UNCONFIGURED |
9302 						HCI_MGMT_UNTRUSTED },
9303 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9304 						HCI_MGMT_UNCONFIGURED },
9305 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9306 						HCI_MGMT_UNCONFIGURED },
9307 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9308 						HCI_MGMT_VAR_LEN },
9309 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9310 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9311 						HCI_MGMT_NO_HDEV |
9312 						HCI_MGMT_UNTRUSTED },
9313 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9314 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9315 						HCI_MGMT_VAR_LEN },
9316 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9317 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9318 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9319 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9320 						HCI_MGMT_UNTRUSTED },
9321 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9322 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9323 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9324 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9325 						HCI_MGMT_VAR_LEN },
9326 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9327 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9328 						HCI_MGMT_UNTRUSTED },
9329 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9330 						HCI_MGMT_UNTRUSTED |
9331 						HCI_MGMT_HDEV_OPTIONAL },
9332 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9333 						HCI_MGMT_VAR_LEN |
9334 						HCI_MGMT_HDEV_OPTIONAL },
9335 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9336 						HCI_MGMT_UNTRUSTED },
9337 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9338 						HCI_MGMT_VAR_LEN },
9339 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9340 						HCI_MGMT_UNTRUSTED },
9341 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9342 						HCI_MGMT_VAR_LEN },
9343 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9344 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9345 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9346 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9347 						HCI_MGMT_VAR_LEN },
9348 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9349 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9350 						HCI_MGMT_VAR_LEN },
9351 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9352 						HCI_MGMT_VAR_LEN },
9353 	{ add_adv_patterns_monitor_rssi,
9354 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9355 						HCI_MGMT_VAR_LEN },
9356 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9357 						HCI_MGMT_VAR_LEN },
9358 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9359 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9360 						HCI_MGMT_VAR_LEN },
9361 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9362 };
9363 
9364 void mgmt_index_added(struct hci_dev *hdev)
9365 {
9366 	struct mgmt_ev_ext_index ev;
9367 
9368 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9369 		return;
9370 
9371 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9372 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9373 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9374 		ev.type = 0x01;
9375 	} else {
9376 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9377 				 HCI_MGMT_INDEX_EVENTS);
9378 		ev.type = 0x00;
9379 	}
9380 
9381 	ev.bus = hdev->bus;
9382 
9383 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9384 			 HCI_MGMT_EXT_INDEX_EVENTS);
9385 }
9386 
9387 void mgmt_index_removed(struct hci_dev *hdev)
9388 {
9389 	struct mgmt_ev_ext_index ev;
9390 	u8 status = MGMT_STATUS_INVALID_INDEX;
9391 
9392 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9393 		return;
9394 
9395 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9396 
9397 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9398 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9399 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9400 		ev.type = 0x01;
9401 	} else {
9402 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9403 				 HCI_MGMT_INDEX_EVENTS);
9404 		ev.type = 0x00;
9405 	}
9406 
9407 	ev.bus = hdev->bus;
9408 
9409 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9410 			 HCI_MGMT_EXT_INDEX_EVENTS);
9411 
9412 	/* Cancel any remaining timed work */
9413 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9414 		return;
9415 	cancel_delayed_work_sync(&hdev->discov_off);
9416 	cancel_delayed_work_sync(&hdev->service_cache);
9417 	cancel_delayed_work_sync(&hdev->rpa_expired);
9418 }
9419 
9420 void mgmt_power_on(struct hci_dev *hdev, int err)
9421 {
9422 	struct cmd_lookup match = { NULL, hdev };
9423 
9424 	bt_dev_dbg(hdev, "err %d", err);
9425 
9426 	hci_dev_lock(hdev);
9427 
9428 	if (!err) {
9429 		restart_le_actions(hdev);
9430 		hci_update_passive_scan(hdev);
9431 	}
9432 
9433 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9434 
9435 	new_settings(hdev, match.sk);
9436 
9437 	if (match.sk)
9438 		sock_put(match.sk);
9439 
9440 	hci_dev_unlock(hdev);
9441 }
9442 
9443 void __mgmt_power_off(struct hci_dev *hdev)
9444 {
9445 	struct cmd_lookup match = { NULL, hdev };
9446 	u8 status, zero_cod[] = { 0, 0, 0 };
9447 
9448 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9449 
9450 	/* If the power off is because of hdev unregistration let
9451 	 * use the appropriate INVALID_INDEX status. Otherwise use
9452 	 * NOT_POWERED. We cover both scenarios here since later in
9453 	 * mgmt_index_removed() any hci_conn callbacks will have already
9454 	 * been triggered, potentially causing misleading DISCONNECTED
9455 	 * status responses.
9456 	 */
9457 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9458 		status = MGMT_STATUS_INVALID_INDEX;
9459 	else
9460 		status = MGMT_STATUS_NOT_POWERED;
9461 
9462 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9463 
9464 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9465 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9466 				   zero_cod, sizeof(zero_cod),
9467 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9468 		ext_info_changed(hdev, NULL);
9469 	}
9470 
9471 	new_settings(hdev, match.sk);
9472 
9473 	if (match.sk)
9474 		sock_put(match.sk);
9475 }
9476 
9477 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9478 {
9479 	struct mgmt_pending_cmd *cmd;
9480 	u8 status;
9481 
9482 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9483 	if (!cmd)
9484 		return;
9485 
9486 	if (err == -ERFKILL)
9487 		status = MGMT_STATUS_RFKILLED;
9488 	else
9489 		status = MGMT_STATUS_FAILED;
9490 
9491 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9492 
9493 	mgmt_pending_remove(cmd);
9494 }
9495 
9496 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9497 		       bool persistent)
9498 {
9499 	struct mgmt_ev_new_link_key ev;
9500 
9501 	memset(&ev, 0, sizeof(ev));
9502 
9503 	ev.store_hint = persistent;
9504 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9505 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9506 	ev.key.type = key->type;
9507 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9508 	ev.key.pin_len = key->pin_len;
9509 
9510 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9511 }
9512 
9513 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9514 {
9515 	switch (ltk->type) {
9516 	case SMP_LTK:
9517 	case SMP_LTK_RESPONDER:
9518 		if (ltk->authenticated)
9519 			return MGMT_LTK_AUTHENTICATED;
9520 		return MGMT_LTK_UNAUTHENTICATED;
9521 	case SMP_LTK_P256:
9522 		if (ltk->authenticated)
9523 			return MGMT_LTK_P256_AUTH;
9524 		return MGMT_LTK_P256_UNAUTH;
9525 	case SMP_LTK_P256_DEBUG:
9526 		return MGMT_LTK_P256_DEBUG;
9527 	}
9528 
9529 	return MGMT_LTK_UNAUTHENTICATED;
9530 }
9531 
9532 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9533 {
9534 	struct mgmt_ev_new_long_term_key ev;
9535 
9536 	memset(&ev, 0, sizeof(ev));
9537 
9538 	/* Devices using resolvable or non-resolvable random addresses
9539 	 * without providing an identity resolving key don't require
9540 	 * to store long term keys. Their addresses will change the
9541 	 * next time around.
9542 	 *
9543 	 * Only when a remote device provides an identity address
9544 	 * make sure the long term key is stored. If the remote
9545 	 * identity is known, the long term keys are internally
9546 	 * mapped to the identity address. So allow static random
9547 	 * and public addresses here.
9548 	 */
9549 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9550 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9551 		ev.store_hint = 0x00;
9552 	else
9553 		ev.store_hint = persistent;
9554 
9555 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9556 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9557 	ev.key.type = mgmt_ltk_type(key);
9558 	ev.key.enc_size = key->enc_size;
9559 	ev.key.ediv = key->ediv;
9560 	ev.key.rand = key->rand;
9561 
9562 	if (key->type == SMP_LTK)
9563 		ev.key.initiator = 1;
9564 
9565 	/* Make sure we copy only the significant bytes based on the
9566 	 * encryption key size, and set the rest of the value to zeroes.
9567 	 */
9568 	memcpy(ev.key.val, key->val, key->enc_size);
9569 	memset(ev.key.val + key->enc_size, 0,
9570 	       sizeof(ev.key.val) - key->enc_size);
9571 
9572 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9573 }
9574 
9575 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9576 {
9577 	struct mgmt_ev_new_irk ev;
9578 
9579 	memset(&ev, 0, sizeof(ev));
9580 
9581 	ev.store_hint = persistent;
9582 
9583 	bacpy(&ev.rpa, &irk->rpa);
9584 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9585 	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9586 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9587 
9588 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9589 }
9590 
9591 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9592 		   bool persistent)
9593 {
9594 	struct mgmt_ev_new_csrk ev;
9595 
9596 	memset(&ev, 0, sizeof(ev));
9597 
9598 	/* Devices using resolvable or non-resolvable random addresses
9599 	 * without providing an identity resolving key don't require
9600 	 * to store signature resolving keys. Their addresses will change
9601 	 * the next time around.
9602 	 *
9603 	 * Only when a remote device provides an identity address
9604 	 * make sure the signature resolving key is stored. So allow
9605 	 * static random and public addresses here.
9606 	 */
9607 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9608 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9609 		ev.store_hint = 0x00;
9610 	else
9611 		ev.store_hint = persistent;
9612 
9613 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9614 	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9615 	ev.key.type = csrk->type;
9616 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9617 
9618 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9619 }
9620 
9621 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9622 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9623 			 u16 max_interval, u16 latency, u16 timeout)
9624 {
9625 	struct mgmt_ev_new_conn_param ev;
9626 
9627 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9628 		return;
9629 
9630 	memset(&ev, 0, sizeof(ev));
9631 	bacpy(&ev.addr.bdaddr, bdaddr);
9632 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9633 	ev.store_hint = store_hint;
9634 	ev.min_interval = cpu_to_le16(min_interval);
9635 	ev.max_interval = cpu_to_le16(max_interval);
9636 	ev.latency = cpu_to_le16(latency);
9637 	ev.timeout = cpu_to_le16(timeout);
9638 
9639 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9640 }
9641 
9642 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9643 			   u8 *name, u8 name_len)
9644 {
9645 	struct sk_buff *skb;
9646 	struct mgmt_ev_device_connected *ev;
9647 	u16 eir_len = 0;
9648 	u32 flags = 0;
9649 
9650 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9651 		return;
9652 
9653 	/* allocate buff for LE or BR/EDR adv */
9654 	if (conn->le_adv_data_len > 0)
9655 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9656 				     sizeof(*ev) + conn->le_adv_data_len);
9657 	else
9658 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9659 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9660 				     eir_precalc_len(sizeof(conn->dev_class)));
9661 
9662 	ev = skb_put(skb, sizeof(*ev));
9663 	bacpy(&ev->addr.bdaddr, &conn->dst);
9664 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9665 
9666 	if (conn->out)
9667 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9668 
9669 	ev->flags = __cpu_to_le32(flags);
9670 
9671 	/* We must ensure that the EIR Data fields are ordered and
9672 	 * unique. Keep it simple for now and avoid the problem by not
9673 	 * adding any BR/EDR data to the LE adv.
9674 	 */
9675 	if (conn->le_adv_data_len > 0) {
9676 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9677 		eir_len = conn->le_adv_data_len;
9678 	} else {
9679 		if (name)
9680 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9681 
9682 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9683 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9684 						    conn->dev_class, sizeof(conn->dev_class));
9685 	}
9686 
9687 	ev->eir_len = cpu_to_le16(eir_len);
9688 
9689 	mgmt_event_skb(skb, NULL);
9690 }
9691 
9692 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9693 {
9694 	struct sock **sk = data;
9695 
9696 	cmd->cmd_complete(cmd, 0);
9697 
9698 	*sk = cmd->sk;
9699 	sock_hold(*sk);
9700 
9701 	mgmt_pending_remove(cmd);
9702 }
9703 
9704 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9705 {
9706 	struct hci_dev *hdev = data;
9707 	struct mgmt_cp_unpair_device *cp = cmd->param;
9708 
9709 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9710 
9711 	cmd->cmd_complete(cmd, 0);
9712 	mgmt_pending_remove(cmd);
9713 }
9714 
9715 bool mgmt_powering_down(struct hci_dev *hdev)
9716 {
9717 	struct mgmt_pending_cmd *cmd;
9718 	struct mgmt_mode *cp;
9719 
9720 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9721 		return true;
9722 
9723 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9724 	if (!cmd)
9725 		return false;
9726 
9727 	cp = cmd->param;
9728 	if (!cp->val)
9729 		return true;
9730 
9731 	return false;
9732 }
9733 
9734 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9735 			      u8 link_type, u8 addr_type, u8 reason,
9736 			      bool mgmt_connected)
9737 {
9738 	struct mgmt_ev_device_disconnected ev;
9739 	struct sock *sk = NULL;
9740 
9741 	if (!mgmt_connected)
9742 		return;
9743 
9744 	if (link_type != ACL_LINK && link_type != LE_LINK)
9745 		return;
9746 
9747 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9748 
9749 	bacpy(&ev.addr.bdaddr, bdaddr);
9750 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9751 	ev.reason = reason;
9752 
9753 	/* Report disconnects due to suspend */
9754 	if (hdev->suspended)
9755 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9756 
9757 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9758 
9759 	if (sk)
9760 		sock_put(sk);
9761 
9762 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9763 			     hdev);
9764 }
9765 
9766 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9767 			    u8 link_type, u8 addr_type, u8 status)
9768 {
9769 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9770 	struct mgmt_cp_disconnect *cp;
9771 	struct mgmt_pending_cmd *cmd;
9772 
9773 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9774 			     hdev);
9775 
9776 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9777 	if (!cmd)
9778 		return;
9779 
9780 	cp = cmd->param;
9781 
9782 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9783 		return;
9784 
9785 	if (cp->addr.type != bdaddr_type)
9786 		return;
9787 
9788 	cmd->cmd_complete(cmd, mgmt_status(status));
9789 	mgmt_pending_remove(cmd);
9790 }
9791 
9792 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9793 			 u8 addr_type, u8 status)
9794 {
9795 	struct mgmt_ev_connect_failed ev;
9796 
9797 	bacpy(&ev.addr.bdaddr, bdaddr);
9798 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9799 	ev.status = mgmt_status(status);
9800 
9801 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9802 }
9803 
9804 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9805 {
9806 	struct mgmt_ev_pin_code_request ev;
9807 
9808 	bacpy(&ev.addr.bdaddr, bdaddr);
9809 	ev.addr.type = BDADDR_BREDR;
9810 	ev.secure = secure;
9811 
9812 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9813 }
9814 
9815 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9816 				  u8 status)
9817 {
9818 	struct mgmt_pending_cmd *cmd;
9819 
9820 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9821 	if (!cmd)
9822 		return;
9823 
9824 	cmd->cmd_complete(cmd, mgmt_status(status));
9825 	mgmt_pending_remove(cmd);
9826 }
9827 
9828 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9829 				      u8 status)
9830 {
9831 	struct mgmt_pending_cmd *cmd;
9832 
9833 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9834 	if (!cmd)
9835 		return;
9836 
9837 	cmd->cmd_complete(cmd, mgmt_status(status));
9838 	mgmt_pending_remove(cmd);
9839 }
9840 
9841 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9842 			      u8 link_type, u8 addr_type, u32 value,
9843 			      u8 confirm_hint)
9844 {
9845 	struct mgmt_ev_user_confirm_request ev;
9846 
9847 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9848 
9849 	bacpy(&ev.addr.bdaddr, bdaddr);
9850 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9851 	ev.confirm_hint = confirm_hint;
9852 	ev.value = cpu_to_le32(value);
9853 
9854 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9855 			  NULL);
9856 }
9857 
9858 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9859 			      u8 link_type, u8 addr_type)
9860 {
9861 	struct mgmt_ev_user_passkey_request ev;
9862 
9863 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9864 
9865 	bacpy(&ev.addr.bdaddr, bdaddr);
9866 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9867 
9868 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9869 			  NULL);
9870 }
9871 
9872 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 				      u8 link_type, u8 addr_type, u8 status,
9874 				      u8 opcode)
9875 {
9876 	struct mgmt_pending_cmd *cmd;
9877 
9878 	cmd = pending_find(opcode, hdev);
9879 	if (!cmd)
9880 		return -ENOENT;
9881 
9882 	cmd->cmd_complete(cmd, mgmt_status(status));
9883 	mgmt_pending_remove(cmd);
9884 
9885 	return 0;
9886 }
9887 
9888 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9889 				     u8 link_type, u8 addr_type, u8 status)
9890 {
9891 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9892 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9893 }
9894 
9895 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9896 					 u8 link_type, u8 addr_type, u8 status)
9897 {
9898 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9899 					  status,
9900 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9901 }
9902 
9903 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9904 				     u8 link_type, u8 addr_type, u8 status)
9905 {
9906 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9907 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9908 }
9909 
9910 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9911 					 u8 link_type, u8 addr_type, u8 status)
9912 {
9913 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9914 					  status,
9915 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9916 }
9917 
9918 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9919 			     u8 link_type, u8 addr_type, u32 passkey,
9920 			     u8 entered)
9921 {
9922 	struct mgmt_ev_passkey_notify ev;
9923 
9924 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9925 
9926 	bacpy(&ev.addr.bdaddr, bdaddr);
9927 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9928 	ev.passkey = __cpu_to_le32(passkey);
9929 	ev.entered = entered;
9930 
9931 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9932 }
9933 
9934 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9935 {
9936 	struct mgmt_ev_auth_failed ev;
9937 	struct mgmt_pending_cmd *cmd;
9938 	u8 status = mgmt_status(hci_status);
9939 
9940 	bacpy(&ev.addr.bdaddr, &conn->dst);
9941 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9942 	ev.status = status;
9943 
9944 	cmd = find_pairing(conn);
9945 
9946 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9947 		    cmd ? cmd->sk : NULL);
9948 
9949 	if (cmd) {
9950 		cmd->cmd_complete(cmd, status);
9951 		mgmt_pending_remove(cmd);
9952 	}
9953 }
9954 
9955 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9956 {
9957 	struct cmd_lookup match = { NULL, hdev };
9958 	bool changed;
9959 
9960 	if (status) {
9961 		u8 mgmt_err = mgmt_status(status);
9962 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9963 				     cmd_status_rsp, &mgmt_err);
9964 		return;
9965 	}
9966 
9967 	if (test_bit(HCI_AUTH, &hdev->flags))
9968 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9969 	else
9970 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9971 
9972 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9973 			     &match);
9974 
9975 	if (changed)
9976 		new_settings(hdev, match.sk);
9977 
9978 	if (match.sk)
9979 		sock_put(match.sk);
9980 }
9981 
9982 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9983 {
9984 	struct cmd_lookup *match = data;
9985 
9986 	if (match->sk == NULL) {
9987 		match->sk = cmd->sk;
9988 		sock_hold(match->sk);
9989 	}
9990 }
9991 
9992 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9993 				    u8 status)
9994 {
9995 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9996 
9997 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9998 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9999 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10000 
10001 	if (!status) {
10002 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10003 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10004 		ext_info_changed(hdev, NULL);
10005 	}
10006 
10007 	if (match.sk)
10008 		sock_put(match.sk);
10009 }
10010 
10011 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10012 {
10013 	struct mgmt_cp_set_local_name ev;
10014 	struct mgmt_pending_cmd *cmd;
10015 
10016 	if (status)
10017 		return;
10018 
10019 	memset(&ev, 0, sizeof(ev));
10020 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10021 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10022 
10023 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10024 	if (!cmd) {
10025 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10026 
10027 		/* If this is a HCI command related to powering on the
10028 		 * HCI dev don't send any mgmt signals.
10029 		 */
10030 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10031 			return;
10032 
10033 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10034 			return;
10035 	}
10036 
10037 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10038 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10039 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10040 }
10041 
10042 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10043 {
10044 	int i;
10045 
10046 	for (i = 0; i < uuid_count; i++) {
10047 		if (!memcmp(uuid, uuids[i], 16))
10048 			return true;
10049 	}
10050 
10051 	return false;
10052 }
10053 
10054 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10055 {
10056 	u16 parsed = 0;
10057 
10058 	while (parsed < eir_len) {
10059 		u8 field_len = eir[0];
10060 		u8 uuid[16];
10061 		int i;
10062 
10063 		if (field_len == 0)
10064 			break;
10065 
10066 		if (eir_len - parsed < field_len + 1)
10067 			break;
10068 
10069 		switch (eir[1]) {
10070 		case EIR_UUID16_ALL:
10071 		case EIR_UUID16_SOME:
10072 			for (i = 0; i + 3 <= field_len; i += 2) {
10073 				memcpy(uuid, bluetooth_base_uuid, 16);
10074 				uuid[13] = eir[i + 3];
10075 				uuid[12] = eir[i + 2];
10076 				if (has_uuid(uuid, uuid_count, uuids))
10077 					return true;
10078 			}
10079 			break;
10080 		case EIR_UUID32_ALL:
10081 		case EIR_UUID32_SOME:
10082 			for (i = 0; i + 5 <= field_len; i += 4) {
10083 				memcpy(uuid, bluetooth_base_uuid, 16);
10084 				uuid[15] = eir[i + 5];
10085 				uuid[14] = eir[i + 4];
10086 				uuid[13] = eir[i + 3];
10087 				uuid[12] = eir[i + 2];
10088 				if (has_uuid(uuid, uuid_count, uuids))
10089 					return true;
10090 			}
10091 			break;
10092 		case EIR_UUID128_ALL:
10093 		case EIR_UUID128_SOME:
10094 			for (i = 0; i + 17 <= field_len; i += 16) {
10095 				memcpy(uuid, eir + i + 2, 16);
10096 				if (has_uuid(uuid, uuid_count, uuids))
10097 					return true;
10098 			}
10099 			break;
10100 		}
10101 
10102 		parsed += field_len + 1;
10103 		eir += field_len + 1;
10104 	}
10105 
10106 	return false;
10107 }
10108 
10109 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10110 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10111 {
10112 	/* If a RSSI threshold has been specified, and
10113 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10114 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10115 	 * is set, let it through for further processing, as we might need to
10116 	 * restart the scan.
10117 	 *
10118 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10119 	 * the results are also dropped.
10120 	 */
10121 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10122 	    (rssi == HCI_RSSI_INVALID ||
10123 	    (rssi < hdev->discovery.rssi &&
10124 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10125 		return  false;
10126 
10127 	if (hdev->discovery.uuid_count != 0) {
10128 		/* If a list of UUIDs is provided in filter, results with no
10129 		 * matching UUID should be dropped.
10130 		 */
10131 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10132 				   hdev->discovery.uuids) &&
10133 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10134 				   hdev->discovery.uuid_count,
10135 				   hdev->discovery.uuids))
10136 			return false;
10137 	}
10138 
10139 	/* If duplicate filtering does not report RSSI changes, then restart
10140 	 * scanning to ensure updated result with updated RSSI values.
10141 	 */
10142 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10143 		/* Validate RSSI value against the RSSI threshold once more. */
10144 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10145 		    rssi < hdev->discovery.rssi)
10146 			return false;
10147 	}
10148 
10149 	return true;
10150 }
10151 
10152 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10153 				  bdaddr_t *bdaddr, u8 addr_type)
10154 {
10155 	struct mgmt_ev_adv_monitor_device_lost ev;
10156 
10157 	ev.monitor_handle = cpu_to_le16(handle);
10158 	bacpy(&ev.addr.bdaddr, bdaddr);
10159 	ev.addr.type = addr_type;
10160 
10161 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10162 		   NULL);
10163 }
10164 
10165 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10166 					       struct sk_buff *skb,
10167 					       struct sock *skip_sk,
10168 					       u16 handle)
10169 {
10170 	struct sk_buff *advmon_skb;
10171 	size_t advmon_skb_len;
10172 	__le16 *monitor_handle;
10173 
10174 	if (!skb)
10175 		return;
10176 
10177 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10178 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10179 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10180 				    advmon_skb_len);
10181 	if (!advmon_skb)
10182 		return;
10183 
10184 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10185 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10186 	 * store monitor_handle of the matched monitor.
10187 	 */
10188 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10189 	*monitor_handle = cpu_to_le16(handle);
10190 	skb_put_data(advmon_skb, skb->data, skb->len);
10191 
10192 	mgmt_event_skb(advmon_skb, skip_sk);
10193 }
10194 
10195 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10196 					  bdaddr_t *bdaddr, bool report_device,
10197 					  struct sk_buff *skb,
10198 					  struct sock *skip_sk)
10199 {
10200 	struct monitored_device *dev, *tmp;
10201 	bool matched = false;
10202 	bool notified = false;
10203 
10204 	/* We have received the Advertisement Report because:
10205 	 * 1. the kernel has initiated active discovery
10206 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10207 	 *    passive scanning
10208 	 * 3. if none of the above is true, we have one or more active
10209 	 *    Advertisement Monitor
10210 	 *
10211 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10212 	 * and report ONLY one advertisement per device for the matched Monitor
10213 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10214 	 *
10215 	 * For case 3, since we are not active scanning and all advertisements
10216 	 * received are due to a matched Advertisement Monitor, report all
10217 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10218 	 */
10219 	if (report_device && !hdev->advmon_pend_notify) {
10220 		mgmt_event_skb(skb, skip_sk);
10221 		return;
10222 	}
10223 
10224 	hdev->advmon_pend_notify = false;
10225 
10226 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10227 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10228 			matched = true;
10229 
10230 			if (!dev->notified) {
10231 				mgmt_send_adv_monitor_device_found(hdev, skb,
10232 								   skip_sk,
10233 								   dev->handle);
10234 				notified = true;
10235 				dev->notified = true;
10236 			}
10237 		}
10238 
10239 		if (!dev->notified)
10240 			hdev->advmon_pend_notify = true;
10241 	}
10242 
10243 	if (!report_device &&
10244 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10245 		/* Handle 0 indicates that we are not active scanning and this
10246 		 * is a subsequent advertisement report for an already matched
10247 		 * Advertisement Monitor or the controller offloading support
10248 		 * is not available.
10249 		 */
10250 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10251 	}
10252 
10253 	if (report_device)
10254 		mgmt_event_skb(skb, skip_sk);
10255 	else
10256 		kfree_skb(skb);
10257 }
10258 
10259 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10260 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10261 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10262 			      u64 instant)
10263 {
10264 	struct sk_buff *skb;
10265 	struct mgmt_ev_mesh_device_found *ev;
10266 	int i, j;
10267 
10268 	if (!hdev->mesh_ad_types[0])
10269 		goto accepted;
10270 
10271 	/* Scan for requested AD types */
10272 	if (eir_len > 0) {
10273 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10274 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10275 				if (!hdev->mesh_ad_types[j])
10276 					break;
10277 
10278 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10279 					goto accepted;
10280 			}
10281 		}
10282 	}
10283 
10284 	if (scan_rsp_len > 0) {
10285 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10286 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10287 				if (!hdev->mesh_ad_types[j])
10288 					break;
10289 
10290 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10291 					goto accepted;
10292 			}
10293 		}
10294 	}
10295 
10296 	return;
10297 
10298 accepted:
10299 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10300 			     sizeof(*ev) + eir_len + scan_rsp_len);
10301 	if (!skb)
10302 		return;
10303 
10304 	ev = skb_put(skb, sizeof(*ev));
10305 
10306 	bacpy(&ev->addr.bdaddr, bdaddr);
10307 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10308 	ev->rssi = rssi;
10309 	ev->flags = cpu_to_le32(flags);
10310 	ev->instant = cpu_to_le64(instant);
10311 
10312 	if (eir_len > 0)
10313 		/* Copy EIR or advertising data into event */
10314 		skb_put_data(skb, eir, eir_len);
10315 
10316 	if (scan_rsp_len > 0)
10317 		/* Append scan response data to event */
10318 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10319 
10320 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10321 
10322 	mgmt_event_skb(skb, NULL);
10323 }
10324 
10325 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10326 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10327 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10328 		       u64 instant)
10329 {
10330 	struct sk_buff *skb;
10331 	struct mgmt_ev_device_found *ev;
10332 	bool report_device = hci_discovery_active(hdev);
10333 
10334 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10335 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10336 				  eir, eir_len, scan_rsp, scan_rsp_len,
10337 				  instant);
10338 
10339 	/* Don't send events for a non-kernel initiated discovery. With
10340 	 * LE one exception is if we have pend_le_reports > 0 in which
10341 	 * case we're doing passive scanning and want these events.
10342 	 */
10343 	if (!hci_discovery_active(hdev)) {
10344 		if (link_type == ACL_LINK)
10345 			return;
10346 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10347 			report_device = true;
10348 		else if (!hci_is_adv_monitoring(hdev))
10349 			return;
10350 	}
10351 
10352 	if (hdev->discovery.result_filtering) {
10353 		/* We are using service discovery */
10354 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10355 				     scan_rsp_len))
10356 			return;
10357 	}
10358 
10359 	if (hdev->discovery.limited) {
10360 		/* Check for limited discoverable bit */
10361 		if (dev_class) {
10362 			if (!(dev_class[1] & 0x20))
10363 				return;
10364 		} else {
10365 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10366 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10367 				return;
10368 		}
10369 	}
10370 
10371 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10372 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10373 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10374 	if (!skb)
10375 		return;
10376 
10377 	ev = skb_put(skb, sizeof(*ev));
10378 
10379 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10380 	 * RSSI value was reported as 0 when not available. This behavior
10381 	 * is kept when using device discovery. This is required for full
10382 	 * backwards compatibility with the API.
10383 	 *
10384 	 * However when using service discovery, the value 127 will be
10385 	 * returned when the RSSI is not available.
10386 	 */
10387 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10388 	    link_type == ACL_LINK)
10389 		rssi = 0;
10390 
10391 	bacpy(&ev->addr.bdaddr, bdaddr);
10392 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10393 	ev->rssi = rssi;
10394 	ev->flags = cpu_to_le32(flags);
10395 
10396 	if (eir_len > 0)
10397 		/* Copy EIR or advertising data into event */
10398 		skb_put_data(skb, eir, eir_len);
10399 
10400 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10401 		u8 eir_cod[5];
10402 
10403 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10404 					   dev_class, 3);
10405 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10406 	}
10407 
10408 	if (scan_rsp_len > 0)
10409 		/* Append scan response data to event */
10410 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10411 
10412 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10413 
10414 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10415 }
10416 
10417 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10418 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10419 {
10420 	struct sk_buff *skb;
10421 	struct mgmt_ev_device_found *ev;
10422 	u16 eir_len = 0;
10423 	u32 flags = 0;
10424 
10425 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10426 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10427 
10428 	ev = skb_put(skb, sizeof(*ev));
10429 	bacpy(&ev->addr.bdaddr, bdaddr);
10430 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10431 	ev->rssi = rssi;
10432 
10433 	if (name)
10434 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10435 	else
10436 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10437 
10438 	ev->eir_len = cpu_to_le16(eir_len);
10439 	ev->flags = cpu_to_le32(flags);
10440 
10441 	mgmt_event_skb(skb, NULL);
10442 }
10443 
10444 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10445 {
10446 	struct mgmt_ev_discovering ev;
10447 
10448 	bt_dev_dbg(hdev, "discovering %u", discovering);
10449 
10450 	memset(&ev, 0, sizeof(ev));
10451 	ev.type = hdev->discovery.type;
10452 	ev.discovering = discovering;
10453 
10454 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10455 }
10456 
10457 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10458 {
10459 	struct mgmt_ev_controller_suspend ev;
10460 
10461 	ev.suspend_state = state;
10462 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10463 }
10464 
10465 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10466 		   u8 addr_type)
10467 {
10468 	struct mgmt_ev_controller_resume ev;
10469 
10470 	ev.wake_reason = reason;
10471 	if (bdaddr) {
10472 		bacpy(&ev.addr.bdaddr, bdaddr);
10473 		ev.addr.type = addr_type;
10474 	} else {
10475 		memset(&ev.addr, 0, sizeof(ev.addr));
10476 	}
10477 
10478 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10479 }
10480 
10481 static struct hci_mgmt_chan chan = {
10482 	.channel	= HCI_CHANNEL_CONTROL,
10483 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10484 	.handlers	= mgmt_handlers,
10485 	.hdev_init	= mgmt_init_hdev,
10486 };
10487 
10488 int mgmt_init(void)
10489 {
10490 	return hci_mgmt_chan_register(&chan);
10491 }
10492 
10493 void mgmt_exit(void)
10494 {
10495 	hci_mgmt_chan_unregister(&chan);
10496 }
10497 
10498 void mgmt_cleanup(struct sock *sk)
10499 {
10500 	struct mgmt_mesh_tx *mesh_tx;
10501 	struct hci_dev *hdev;
10502 
10503 	read_lock(&hci_dev_list_lock);
10504 
10505 	list_for_each_entry(hdev, &hci_dev_list, list) {
10506 		do {
10507 			mesh_tx = mgmt_mesh_next(hdev, sk);
10508 
10509 			if (mesh_tx)
10510 				mesh_send_complete(hdev, mesh_tx, true);
10511 		} while (mesh_tx);
10512 	}
10513 
10514 	read_unlock(&hci_dev_list_lock);
10515 }
10516