xref: /linux/net/bluetooth/mgmt.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (err == -ECANCELED ||
1322 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 		return;
1324 
1325 	cp = cmd->param;
1326 
1327 	bt_dev_dbg(hdev, "err %d", err);
1328 
1329 	if (!err) {
1330 		if (cp->val) {
1331 			hci_dev_lock(hdev);
1332 			restart_le_actions(hdev);
1333 			hci_update_passive_scan(hdev);
1334 			hci_dev_unlock(hdev);
1335 		}
1336 
1337 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338 
1339 		/* Only call new_setting for power on as power off is deferred
1340 		 * to hdev->power_off work which does call hci_dev_do_close.
1341 		 */
1342 		if (cp->val)
1343 			new_settings(hdev, cmd->sk);
1344 	} else {
1345 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 				mgmt_status(err));
1347 	}
1348 
1349 	mgmt_pending_remove(cmd);
1350 }
1351 
1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 	struct mgmt_pending_cmd *cmd = data;
1355 	struct mgmt_mode *cp;
1356 
1357 	/* Make sure cmd still outstanding. */
1358 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 		return -ECANCELED;
1360 
1361 	cp = cmd->param;
1362 
1363 	BT_DBG("%s", hdev->name);
1364 
1365 	return hci_set_powered_sync(hdev, cp->val);
1366 }
1367 
1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 		       u16 len)
1370 {
1371 	struct mgmt_mode *cp = data;
1372 	struct mgmt_pending_cmd *cmd;
1373 	int err;
1374 
1375 	bt_dev_dbg(hdev, "sock %p", sk);
1376 
1377 	if (cp->val != 0x00 && cp->val != 0x01)
1378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 				       MGMT_STATUS_INVALID_PARAMS);
1380 
1381 	hci_dev_lock(hdev);
1382 
1383 	if (!cp->val) {
1384 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1385 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 					      MGMT_STATUS_BUSY);
1387 			goto failed;
1388 		}
1389 	}
1390 
1391 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1393 				      MGMT_STATUS_BUSY);
1394 		goto failed;
1395 	}
1396 
1397 	if (!!cp->val == hdev_is_powered(hdev)) {
1398 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 		goto failed;
1400 	}
1401 
1402 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 	if (!cmd) {
1404 		err = -ENOMEM;
1405 		goto failed;
1406 	}
1407 
1408 	/* Cancel potentially blocking sync operation before power off */
1409 	if (cp->val == 0x00) {
1410 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1411 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 					 mgmt_set_powered_complete);
1413 	} else {
1414 		/* Use hci_cmd_sync_submit since hdev might not be running */
1415 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 					  mgmt_set_powered_complete);
1417 	}
1418 
1419 	if (err < 0)
1420 		mgmt_pending_remove(cmd);
1421 
1422 failed:
1423 	hci_dev_unlock(hdev);
1424 	return err;
1425 }
1426 
1427 int mgmt_new_settings(struct hci_dev *hdev)
1428 {
1429 	return new_settings(hdev, NULL);
1430 }
1431 
1432 struct cmd_lookup {
1433 	struct sock *sk;
1434 	struct hci_dev *hdev;
1435 	u8 mgmt_status;
1436 };
1437 
1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439 {
1440 	struct cmd_lookup *match = data;
1441 
1442 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1443 
1444 	list_del(&cmd->list);
1445 
1446 	if (match->sk == NULL) {
1447 		match->sk = cmd->sk;
1448 		sock_hold(match->sk);
1449 	}
1450 
1451 	mgmt_pending_free(cmd);
1452 }
1453 
1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 	u8 *status = data;
1457 
1458 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 	mgmt_pending_remove(cmd);
1460 }
1461 
1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463 {
1464 	struct cmd_lookup *match = data;
1465 
1466 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1467 	 * removed/freed.
1468 	 */
1469 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1470 
1471 	if (cmd->cmd_complete) {
1472 		cmd->cmd_complete(cmd, match->mgmt_status);
1473 		mgmt_pending_remove(cmd);
1474 
1475 		return;
1476 	}
1477 
1478 	cmd_status_rsp(cmd, data);
1479 }
1480 
1481 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 {
1483 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484 				 cmd->param, cmd->param_len);
1485 }
1486 
1487 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 				 cmd->param, sizeof(struct mgmt_addr_info));
1491 }
1492 
1493 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 {
1495 	if (!lmp_bredr_capable(hdev))
1496 		return MGMT_STATUS_NOT_SUPPORTED;
1497 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 		return MGMT_STATUS_REJECTED;
1499 	else
1500 		return MGMT_STATUS_SUCCESS;
1501 }
1502 
1503 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 {
1505 	if (!lmp_le_capable(hdev))
1506 		return MGMT_STATUS_NOT_SUPPORTED;
1507 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 		return MGMT_STATUS_REJECTED;
1509 	else
1510 		return MGMT_STATUS_SUCCESS;
1511 }
1512 
1513 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 					   int err)
1515 {
1516 	struct mgmt_pending_cmd *cmd = data;
1517 
1518 	bt_dev_dbg(hdev, "err %d", err);
1519 
1520 	/* Make sure cmd still outstanding. */
1521 	if (err == -ECANCELED ||
1522 	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1523 		return;
1524 
1525 	hci_dev_lock(hdev);
1526 
1527 	if (err) {
1528 		u8 mgmt_err = mgmt_status(err);
1529 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1530 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 		goto done;
1532 	}
1533 
1534 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1535 	    hdev->discov_timeout > 0) {
1536 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1537 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 	}
1539 
1540 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1541 	new_settings(hdev, cmd->sk);
1542 
1543 done:
1544 	mgmt_pending_remove(cmd);
1545 	hci_dev_unlock(hdev);
1546 }
1547 
1548 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1549 {
1550 	BT_DBG("%s", hdev->name);
1551 
1552 	return hci_update_discoverable_sync(hdev);
1553 }
1554 
1555 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 			    u16 len)
1557 {
1558 	struct mgmt_cp_set_discoverable *cp = data;
1559 	struct mgmt_pending_cmd *cmd;
1560 	u16 timeout;
1561 	int err;
1562 
1563 	bt_dev_dbg(hdev, "sock %p", sk);
1564 
1565 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1566 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 				       MGMT_STATUS_REJECTED);
1569 
1570 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1571 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 				       MGMT_STATUS_INVALID_PARAMS);
1573 
1574 	timeout = __le16_to_cpu(cp->timeout);
1575 
1576 	/* Disabling discoverable requires that no timeout is set,
1577 	 * and enabling limited discoverable requires a timeout.
1578 	 */
1579 	if ((cp->val == 0x00 && timeout > 0) ||
1580 	    (cp->val == 0x02 && timeout == 0))
1581 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 				       MGMT_STATUS_INVALID_PARAMS);
1583 
1584 	hci_dev_lock(hdev);
1585 
1586 	if (!hdev_is_powered(hdev) && timeout > 0) {
1587 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 				      MGMT_STATUS_NOT_POWERED);
1589 		goto failed;
1590 	}
1591 
1592 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 				      MGMT_STATUS_BUSY);
1596 		goto failed;
1597 	}
1598 
1599 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 				      MGMT_STATUS_REJECTED);
1602 		goto failed;
1603 	}
1604 
1605 	if (hdev->advertising_paused) {
1606 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 				      MGMT_STATUS_BUSY);
1608 		goto failed;
1609 	}
1610 
1611 	if (!hdev_is_powered(hdev)) {
1612 		bool changed = false;
1613 
1614 		/* Setting limited discoverable when powered off is
1615 		 * not a valid operation since it requires a timeout
1616 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1617 		 */
1618 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1619 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1620 			changed = true;
1621 		}
1622 
1623 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1624 		if (err < 0)
1625 			goto failed;
1626 
1627 		if (changed)
1628 			err = new_settings(hdev, sk);
1629 
1630 		goto failed;
1631 	}
1632 
1633 	/* If the current mode is the same, then just update the timeout
1634 	 * value with the new value. And if only the timeout gets updated,
1635 	 * then no need for any HCI transactions.
1636 	 */
1637 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1638 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1639 						   HCI_LIMITED_DISCOVERABLE)) {
1640 		cancel_delayed_work(&hdev->discov_off);
1641 		hdev->discov_timeout = timeout;
1642 
1643 		if (cp->val && hdev->discov_timeout > 0) {
1644 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1645 			queue_delayed_work(hdev->req_workqueue,
1646 					   &hdev->discov_off, to);
1647 		}
1648 
1649 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1650 		goto failed;
1651 	}
1652 
1653 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1654 	if (!cmd) {
1655 		err = -ENOMEM;
1656 		goto failed;
1657 	}
1658 
1659 	/* Cancel any potential discoverable timeout that might be
1660 	 * still active and store new timeout value. The arming of
1661 	 * the timeout happens in the complete handler.
1662 	 */
1663 	cancel_delayed_work(&hdev->discov_off);
1664 	hdev->discov_timeout = timeout;
1665 
1666 	if (cp->val)
1667 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1668 	else
1669 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670 
1671 	/* Limited discoverable mode */
1672 	if (cp->val == 0x02)
1673 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 	else
1675 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1676 
1677 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1678 				 mgmt_set_discoverable_complete);
1679 
1680 	if (err < 0)
1681 		mgmt_pending_remove(cmd);
1682 
1683 failed:
1684 	hci_dev_unlock(hdev);
1685 	return err;
1686 }
1687 
1688 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 					  int err)
1690 {
1691 	struct mgmt_pending_cmd *cmd = data;
1692 
1693 	bt_dev_dbg(hdev, "err %d", err);
1694 
1695 	/* Make sure cmd still outstanding. */
1696 	if (err == -ECANCELED ||
1697 	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1698 		return;
1699 
1700 	hci_dev_lock(hdev);
1701 
1702 	if (err) {
1703 		u8 mgmt_err = mgmt_status(err);
1704 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 		goto done;
1706 	}
1707 
1708 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1709 	new_settings(hdev, cmd->sk);
1710 
1711 done:
1712 	mgmt_pending_remove(cmd);
1713 
1714 	hci_dev_unlock(hdev);
1715 }
1716 
1717 static int set_connectable_update_settings(struct hci_dev *hdev,
1718 					   struct sock *sk, u8 val)
1719 {
1720 	bool changed = false;
1721 	int err;
1722 
1723 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1724 		changed = true;
1725 
1726 	if (val) {
1727 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1728 	} else {
1729 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1730 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 	}
1732 
1733 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1734 	if (err < 0)
1735 		return err;
1736 
1737 	if (changed) {
1738 		hci_update_scan(hdev);
1739 		hci_update_passive_scan(hdev);
1740 		return new_settings(hdev, sk);
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1747 {
1748 	BT_DBG("%s", hdev->name);
1749 
1750 	return hci_update_connectable_sync(hdev);
1751 }
1752 
1753 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 			   u16 len)
1755 {
1756 	struct mgmt_mode *cp = data;
1757 	struct mgmt_pending_cmd *cmd;
1758 	int err;
1759 
1760 	bt_dev_dbg(hdev, "sock %p", sk);
1761 
1762 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1763 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1764 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1765 				       MGMT_STATUS_REJECTED);
1766 
1767 	if (cp->val != 0x00 && cp->val != 0x01)
1768 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 				       MGMT_STATUS_INVALID_PARAMS);
1770 
1771 	hci_dev_lock(hdev);
1772 
1773 	if (!hdev_is_powered(hdev)) {
1774 		err = set_connectable_update_settings(hdev, sk, cp->val);
1775 		goto failed;
1776 	}
1777 
1778 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1779 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1780 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1781 				      MGMT_STATUS_BUSY);
1782 		goto failed;
1783 	}
1784 
1785 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 	if (!cmd) {
1787 		err = -ENOMEM;
1788 		goto failed;
1789 	}
1790 
1791 	if (cp->val) {
1792 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1793 	} else {
1794 		if (hdev->discov_timeout > 0)
1795 			cancel_delayed_work(&hdev->discov_off);
1796 
1797 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1798 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1799 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 	}
1801 
1802 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1803 				 mgmt_set_connectable_complete);
1804 
1805 	if (err < 0)
1806 		mgmt_pending_remove(cmd);
1807 
1808 failed:
1809 	hci_dev_unlock(hdev);
1810 	return err;
1811 }
1812 
1813 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 			u16 len)
1815 {
1816 	struct mgmt_mode *cp = data;
1817 	bool changed;
1818 	int err;
1819 
1820 	bt_dev_dbg(hdev, "sock %p", sk);
1821 
1822 	if (cp->val != 0x00 && cp->val != 0x01)
1823 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1824 				       MGMT_STATUS_INVALID_PARAMS);
1825 
1826 	hci_dev_lock(hdev);
1827 
1828 	if (cp->val)
1829 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1830 	else
1831 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1832 
1833 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1834 	if (err < 0)
1835 		goto unlock;
1836 
1837 	if (changed) {
1838 		/* In limited privacy mode the change of bondable mode
1839 		 * may affect the local advertising address.
1840 		 */
1841 		hci_update_discoverable(hdev);
1842 
1843 		err = new_settings(hdev, sk);
1844 	}
1845 
1846 unlock:
1847 	hci_dev_unlock(hdev);
1848 	return err;
1849 }
1850 
1851 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 			     u16 len)
1853 {
1854 	struct mgmt_mode *cp = data;
1855 	struct mgmt_pending_cmd *cmd;
1856 	u8 val, status;
1857 	int err;
1858 
1859 	bt_dev_dbg(hdev, "sock %p", sk);
1860 
1861 	status = mgmt_bredr_support(hdev);
1862 	if (status)
1863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 				       status);
1865 
1866 	if (cp->val != 0x00 && cp->val != 0x01)
1867 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1868 				       MGMT_STATUS_INVALID_PARAMS);
1869 
1870 	hci_dev_lock(hdev);
1871 
1872 	if (!hdev_is_powered(hdev)) {
1873 		bool changed = false;
1874 
1875 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1876 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1877 			changed = true;
1878 		}
1879 
1880 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1881 		if (err < 0)
1882 			goto failed;
1883 
1884 		if (changed)
1885 			err = new_settings(hdev, sk);
1886 
1887 		goto failed;
1888 	}
1889 
1890 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1891 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 				      MGMT_STATUS_BUSY);
1893 		goto failed;
1894 	}
1895 
1896 	val = !!cp->val;
1897 
1898 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1899 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1900 		goto failed;
1901 	}
1902 
1903 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1904 	if (!cmd) {
1905 		err = -ENOMEM;
1906 		goto failed;
1907 	}
1908 
1909 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1910 	if (err < 0) {
1911 		mgmt_pending_remove(cmd);
1912 		goto failed;
1913 	}
1914 
1915 failed:
1916 	hci_dev_unlock(hdev);
1917 	return err;
1918 }
1919 
1920 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1921 {
1922 	struct cmd_lookup match = { NULL, hdev };
1923 	struct mgmt_pending_cmd *cmd = data;
1924 	struct mgmt_mode *cp = cmd->param;
1925 	u8 enable = cp->val;
1926 	bool changed;
1927 
1928 	/* Make sure cmd still outstanding. */
1929 	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1930 		return;
1931 
1932 	if (err) {
1933 		u8 mgmt_err = mgmt_status(err);
1934 
1935 		if (enable && hci_dev_test_and_clear_flag(hdev,
1936 							  HCI_SSP_ENABLED)) {
1937 			new_settings(hdev, NULL);
1938 		}
1939 
1940 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 				     &mgmt_err);
1942 		return;
1943 	}
1944 
1945 	if (enable) {
1946 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 	} else {
1948 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1949 	}
1950 
1951 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1952 
1953 	if (changed)
1954 		new_settings(hdev, match.sk);
1955 
1956 	if (match.sk)
1957 		sock_put(match.sk);
1958 
1959 	hci_update_eir_sync(hdev);
1960 }
1961 
1962 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1963 {
1964 	struct mgmt_pending_cmd *cmd = data;
1965 	struct mgmt_mode *cp = cmd->param;
1966 	bool changed = false;
1967 	int err;
1968 
1969 	if (cp->val)
1970 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1971 
1972 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1973 
1974 	if (!err && changed)
1975 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1976 
1977 	return err;
1978 }
1979 
1980 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1981 {
1982 	struct mgmt_mode *cp = data;
1983 	struct mgmt_pending_cmd *cmd;
1984 	u8 status;
1985 	int err;
1986 
1987 	bt_dev_dbg(hdev, "sock %p", sk);
1988 
1989 	status = mgmt_bredr_support(hdev);
1990 	if (status)
1991 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1992 
1993 	if (!lmp_ssp_capable(hdev))
1994 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1995 				       MGMT_STATUS_NOT_SUPPORTED);
1996 
1997 	if (cp->val != 0x00 && cp->val != 0x01)
1998 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1999 				       MGMT_STATUS_INVALID_PARAMS);
2000 
2001 	hci_dev_lock(hdev);
2002 
2003 	if (!hdev_is_powered(hdev)) {
2004 		bool changed;
2005 
2006 		if (cp->val) {
2007 			changed = !hci_dev_test_and_set_flag(hdev,
2008 							     HCI_SSP_ENABLED);
2009 		} else {
2010 			changed = hci_dev_test_and_clear_flag(hdev,
2011 							      HCI_SSP_ENABLED);
2012 		}
2013 
2014 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 		if (err < 0)
2016 			goto failed;
2017 
2018 		if (changed)
2019 			err = new_settings(hdev, sk);
2020 
2021 		goto failed;
2022 	}
2023 
2024 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 				      MGMT_STATUS_BUSY);
2027 		goto failed;
2028 	}
2029 
2030 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 		goto failed;
2033 	}
2034 
2035 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 	if (!cmd)
2037 		err = -ENOMEM;
2038 	else
2039 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 					 set_ssp_complete);
2041 
2042 	if (err < 0) {
2043 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 				      MGMT_STATUS_FAILED);
2045 
2046 		if (cmd)
2047 			mgmt_pending_remove(cmd);
2048 	}
2049 
2050 failed:
2051 	hci_dev_unlock(hdev);
2052 	return err;
2053 }
2054 
2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 	bt_dev_dbg(hdev, "sock %p", sk);
2058 
2059 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2060 				       MGMT_STATUS_NOT_SUPPORTED);
2061 }
2062 
2063 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2064 {
2065 	struct cmd_lookup match = { NULL, hdev };
2066 	u8 status = mgmt_status(err);
2067 
2068 	bt_dev_dbg(hdev, "err %d", err);
2069 
2070 	if (status) {
2071 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2072 							&status);
2073 		return;
2074 	}
2075 
2076 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2077 
2078 	new_settings(hdev, match.sk);
2079 
2080 	if (match.sk)
2081 		sock_put(match.sk);
2082 }
2083 
2084 static int set_le_sync(struct hci_dev *hdev, void *data)
2085 {
2086 	struct mgmt_pending_cmd *cmd = data;
2087 	struct mgmt_mode *cp = cmd->param;
2088 	u8 val = !!cp->val;
2089 	int err;
2090 
2091 	if (!val) {
2092 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2093 
2094 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2095 			hci_disable_advertising_sync(hdev);
2096 
2097 		if (ext_adv_capable(hdev))
2098 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2099 	} else {
2100 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2101 	}
2102 
2103 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2104 
2105 	/* Make sure the controller has a good default for
2106 	 * advertising data. Restrict the update to when LE
2107 	 * has actually been enabled. During power on, the
2108 	 * update in powered_update_hci will take care of it.
2109 	 */
2110 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 		if (ext_adv_capable(hdev)) {
2112 			int status;
2113 
2114 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2115 			if (!status)
2116 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2117 		} else {
2118 			hci_update_adv_data_sync(hdev, 0x00);
2119 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2120 		}
2121 
2122 		hci_update_passive_scan(hdev);
2123 	}
2124 
2125 	return err;
2126 }
2127 
2128 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2129 {
2130 	struct mgmt_pending_cmd *cmd = data;
2131 	u8 status = mgmt_status(err);
2132 	struct sock *sk = cmd->sk;
2133 
2134 	if (status) {
2135 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2136 				     cmd_status_rsp, &status);
2137 		return;
2138 	}
2139 
2140 	mgmt_pending_remove(cmd);
2141 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2142 }
2143 
2144 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2145 {
2146 	struct mgmt_pending_cmd *cmd = data;
2147 	struct mgmt_cp_set_mesh *cp = cmd->param;
2148 	size_t len = cmd->param_len;
2149 
2150 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2151 
2152 	if (cp->enable)
2153 		hci_dev_set_flag(hdev, HCI_MESH);
2154 	else
2155 		hci_dev_clear_flag(hdev, HCI_MESH);
2156 
2157 	len -= sizeof(*cp);
2158 
2159 	/* If filters don't fit, forward all adv pkts */
2160 	if (len <= sizeof(hdev->mesh_ad_types))
2161 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2162 
2163 	hci_update_passive_scan_sync(hdev);
2164 	return 0;
2165 }
2166 
2167 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2168 {
2169 	struct mgmt_cp_set_mesh *cp = data;
2170 	struct mgmt_pending_cmd *cmd;
2171 	int err = 0;
2172 
2173 	bt_dev_dbg(hdev, "sock %p", sk);
2174 
2175 	if (!lmp_le_capable(hdev) ||
2176 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2177 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2178 				       MGMT_STATUS_NOT_SUPPORTED);
2179 
2180 	if (cp->enable != 0x00 && cp->enable != 0x01)
2181 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2182 				       MGMT_STATUS_INVALID_PARAMS);
2183 
2184 	hci_dev_lock(hdev);
2185 
2186 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2187 	if (!cmd)
2188 		err = -ENOMEM;
2189 	else
2190 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2191 					 set_mesh_complete);
2192 
2193 	if (err < 0) {
2194 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2195 				      MGMT_STATUS_FAILED);
2196 
2197 		if (cmd)
2198 			mgmt_pending_remove(cmd);
2199 	}
2200 
2201 	hci_dev_unlock(hdev);
2202 	return err;
2203 }
2204 
2205 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2206 {
2207 	struct mgmt_mesh_tx *mesh_tx = data;
2208 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 	unsigned long mesh_send_interval;
2210 	u8 mgmt_err = mgmt_status(err);
2211 
2212 	/* Report any errors here, but don't report completion */
2213 
2214 	if (mgmt_err) {
2215 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2216 		/* Send Complete Error Code for handle */
2217 		mesh_send_complete(hdev, mesh_tx, false);
2218 		return;
2219 	}
2220 
2221 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2222 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2223 			   mesh_send_interval);
2224 }
2225 
2226 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2227 {
2228 	struct mgmt_mesh_tx *mesh_tx = data;
2229 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2230 	struct adv_info *adv, *next_instance;
2231 	u8 instance = hdev->le_num_of_adv_sets + 1;
2232 	u16 timeout, duration;
2233 	int err = 0;
2234 
2235 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2236 		return MGMT_STATUS_BUSY;
2237 
2238 	timeout = 1000;
2239 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2240 	adv = hci_add_adv_instance(hdev, instance, 0,
2241 				   send->adv_data_len, send->adv_data,
2242 				   0, NULL,
2243 				   timeout, duration,
2244 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2245 				   hdev->le_adv_min_interval,
2246 				   hdev->le_adv_max_interval,
2247 				   mesh_tx->handle);
2248 
2249 	if (!IS_ERR(adv))
2250 		mesh_tx->instance = instance;
2251 	else
2252 		err = PTR_ERR(adv);
2253 
2254 	if (hdev->cur_adv_instance == instance) {
2255 		/* If the currently advertised instance is being changed then
2256 		 * cancel the current advertising and schedule the next
2257 		 * instance. If there is only one instance then the overridden
2258 		 * advertising data will be visible right away.
2259 		 */
2260 		cancel_adv_timeout(hdev);
2261 
2262 		next_instance = hci_get_next_instance(hdev, instance);
2263 		if (next_instance)
2264 			instance = next_instance->instance;
2265 		else
2266 			instance = 0;
2267 	} else if (hdev->adv_instance_timeout) {
2268 		/* Immediately advertise the new instance if no other, or
2269 		 * let it go naturally from queue if ADV is already happening
2270 		 */
2271 		instance = 0;
2272 	}
2273 
2274 	if (instance)
2275 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2276 
2277 	return err;
2278 }
2279 
2280 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2281 {
2282 	struct mgmt_rp_mesh_read_features *rp = data;
2283 
2284 	if (rp->used_handles >= rp->max_handles)
2285 		return;
2286 
2287 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2288 }
2289 
2290 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2291 			 void *data, u16 len)
2292 {
2293 	struct mgmt_rp_mesh_read_features rp;
2294 
2295 	if (!lmp_le_capable(hdev) ||
2296 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2297 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2298 				       MGMT_STATUS_NOT_SUPPORTED);
2299 
2300 	memset(&rp, 0, sizeof(rp));
2301 	rp.index = cpu_to_le16(hdev->id);
2302 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2303 		rp.max_handles = MESH_HANDLES_MAX;
2304 
2305 	hci_dev_lock(hdev);
2306 
2307 	if (rp.max_handles)
2308 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2309 
2310 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2311 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2312 
2313 	hci_dev_unlock(hdev);
2314 	return 0;
2315 }
2316 
2317 static int send_cancel(struct hci_dev *hdev, void *data)
2318 {
2319 	struct mgmt_pending_cmd *cmd = data;
2320 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2321 	struct mgmt_mesh_tx *mesh_tx;
2322 
2323 	if (!cancel->handle) {
2324 		do {
2325 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2326 
2327 			if (mesh_tx)
2328 				mesh_send_complete(hdev, mesh_tx, false);
2329 		} while (mesh_tx);
2330 	} else {
2331 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2332 
2333 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2334 			mesh_send_complete(hdev, mesh_tx, false);
2335 	}
2336 
2337 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2338 			  0, NULL, 0);
2339 	mgmt_pending_free(cmd);
2340 
2341 	return 0;
2342 }
2343 
2344 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2345 			    void *data, u16 len)
2346 {
2347 	struct mgmt_pending_cmd *cmd;
2348 	int err;
2349 
2350 	if (!lmp_le_capable(hdev) ||
2351 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2352 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2353 				       MGMT_STATUS_NOT_SUPPORTED);
2354 
2355 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2356 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2357 				       MGMT_STATUS_REJECTED);
2358 
2359 	hci_dev_lock(hdev);
2360 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2361 	if (!cmd)
2362 		err = -ENOMEM;
2363 	else
2364 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2365 
2366 	if (err < 0) {
2367 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2368 				      MGMT_STATUS_FAILED);
2369 
2370 		if (cmd)
2371 			mgmt_pending_free(cmd);
2372 	}
2373 
2374 	hci_dev_unlock(hdev);
2375 	return err;
2376 }
2377 
2378 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2379 {
2380 	struct mgmt_mesh_tx *mesh_tx;
2381 	struct mgmt_cp_mesh_send *send = data;
2382 	struct mgmt_rp_mesh_read_features rp;
2383 	bool sending;
2384 	int err = 0;
2385 
2386 	if (!lmp_le_capable(hdev) ||
2387 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2391 	    len <= MGMT_MESH_SEND_SIZE ||
2392 	    len > (MGMT_MESH_SEND_SIZE + 31))
2393 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2394 				       MGMT_STATUS_REJECTED);
2395 
2396 	hci_dev_lock(hdev);
2397 
2398 	memset(&rp, 0, sizeof(rp));
2399 	rp.max_handles = MESH_HANDLES_MAX;
2400 
2401 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2402 
2403 	if (rp.max_handles <= rp.used_handles) {
2404 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2405 				      MGMT_STATUS_BUSY);
2406 		goto done;
2407 	}
2408 
2409 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2410 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2411 
2412 	if (!mesh_tx)
2413 		err = -ENOMEM;
2414 	else if (!sending)
2415 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2416 					 mesh_send_start_complete);
2417 
2418 	if (err < 0) {
2419 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2420 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2421 				      MGMT_STATUS_FAILED);
2422 
2423 		if (mesh_tx) {
2424 			if (sending)
2425 				mgmt_mesh_remove(mesh_tx);
2426 		}
2427 	} else {
2428 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2429 
2430 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2431 				  &mesh_tx->handle, 1);
2432 	}
2433 
2434 done:
2435 	hci_dev_unlock(hdev);
2436 	return err;
2437 }
2438 
2439 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2440 {
2441 	struct mgmt_mode *cp = data;
2442 	struct mgmt_pending_cmd *cmd;
2443 	int err;
2444 	u8 val, enabled;
2445 
2446 	bt_dev_dbg(hdev, "sock %p", sk);
2447 
2448 	if (!lmp_le_capable(hdev))
2449 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2450 				       MGMT_STATUS_NOT_SUPPORTED);
2451 
2452 	if (cp->val != 0x00 && cp->val != 0x01)
2453 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2454 				       MGMT_STATUS_INVALID_PARAMS);
2455 
2456 	/* Bluetooth single mode LE only controllers or dual-mode
2457 	 * controllers configured as LE only devices, do not allow
2458 	 * switching LE off. These have either LE enabled explicitly
2459 	 * or BR/EDR has been previously switched off.
2460 	 *
2461 	 * When trying to enable an already enabled LE, then gracefully
2462 	 * send a positive response. Trying to disable it however will
2463 	 * result into rejection.
2464 	 */
2465 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2466 		if (cp->val == 0x01)
2467 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2468 
2469 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2470 				       MGMT_STATUS_REJECTED);
2471 	}
2472 
2473 	hci_dev_lock(hdev);
2474 
2475 	val = !!cp->val;
2476 	enabled = lmp_host_le_capable(hdev);
2477 
2478 	if (!hdev_is_powered(hdev) || val == enabled) {
2479 		bool changed = false;
2480 
2481 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2482 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2483 			changed = true;
2484 		}
2485 
2486 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2487 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2488 			changed = true;
2489 		}
2490 
2491 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2492 		if (err < 0)
2493 			goto unlock;
2494 
2495 		if (changed)
2496 			err = new_settings(hdev, sk);
2497 
2498 		goto unlock;
2499 	}
2500 
2501 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2502 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2503 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2504 				      MGMT_STATUS_BUSY);
2505 		goto unlock;
2506 	}
2507 
2508 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2509 	if (!cmd)
2510 		err = -ENOMEM;
2511 	else
2512 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2513 					 set_le_complete);
2514 
2515 	if (err < 0) {
2516 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 				      MGMT_STATUS_FAILED);
2518 
2519 		if (cmd)
2520 			mgmt_pending_remove(cmd);
2521 	}
2522 
2523 unlock:
2524 	hci_dev_unlock(hdev);
2525 	return err;
2526 }
2527 
2528 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2529 {
2530 	struct mgmt_pending_cmd *cmd = data;
2531 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2532 	struct sk_buff *skb;
2533 
2534 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2535 				le16_to_cpu(cp->params_len), cp->params,
2536 				cp->event, cp->timeout ?
2537 				msecs_to_jiffies(cp->timeout * 1000) :
2538 				HCI_CMD_TIMEOUT);
2539 	if (IS_ERR(skb)) {
2540 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2541 				mgmt_status(PTR_ERR(skb)));
2542 		goto done;
2543 	}
2544 
2545 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2546 			  skb->data, skb->len);
2547 
2548 	kfree_skb(skb);
2549 
2550 done:
2551 	mgmt_pending_free(cmd);
2552 
2553 	return 0;
2554 }
2555 
2556 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2557 			     void *data, u16 len)
2558 {
2559 	struct mgmt_cp_hci_cmd_sync *cp = data;
2560 	struct mgmt_pending_cmd *cmd;
2561 	int err;
2562 
2563 	if (len < sizeof(*cp))
2564 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2565 				       MGMT_STATUS_INVALID_PARAMS);
2566 
2567 	hci_dev_lock(hdev);
2568 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2569 	if (!cmd)
2570 		err = -ENOMEM;
2571 	else
2572 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2573 
2574 	if (err < 0) {
2575 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2576 				      MGMT_STATUS_FAILED);
2577 
2578 		if (cmd)
2579 			mgmt_pending_free(cmd);
2580 	}
2581 
2582 	hci_dev_unlock(hdev);
2583 	return err;
2584 }
2585 
2586 /* This is a helper function to test for pending mgmt commands that can
2587  * cause CoD or EIR HCI commands. We can only allow one such pending
2588  * mgmt command at a time since otherwise we cannot easily track what
2589  * the current values are, will be, and based on that calculate if a new
2590  * HCI command needs to be sent and if yes with what value.
2591  */
2592 static bool pending_eir_or_class(struct hci_dev *hdev)
2593 {
2594 	struct mgmt_pending_cmd *cmd;
2595 
2596 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2597 		switch (cmd->opcode) {
2598 		case MGMT_OP_ADD_UUID:
2599 		case MGMT_OP_REMOVE_UUID:
2600 		case MGMT_OP_SET_DEV_CLASS:
2601 		case MGMT_OP_SET_POWERED:
2602 			return true;
2603 		}
2604 	}
2605 
2606 	return false;
2607 }
2608 
2609 static const u8 bluetooth_base_uuid[] = {
2610 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2611 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2612 };
2613 
2614 static u8 get_uuid_size(const u8 *uuid)
2615 {
2616 	u32 val;
2617 
2618 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2619 		return 128;
2620 
2621 	val = get_unaligned_le32(&uuid[12]);
2622 	if (val > 0xffff)
2623 		return 32;
2624 
2625 	return 16;
2626 }
2627 
2628 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2629 {
2630 	struct mgmt_pending_cmd *cmd = data;
2631 
2632 	bt_dev_dbg(hdev, "err %d", err);
2633 
2634 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2635 			  mgmt_status(err), hdev->dev_class, 3);
2636 
2637 	mgmt_pending_free(cmd);
2638 }
2639 
2640 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2641 {
2642 	int err;
2643 
2644 	err = hci_update_class_sync(hdev);
2645 	if (err)
2646 		return err;
2647 
2648 	return hci_update_eir_sync(hdev);
2649 }
2650 
2651 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2652 {
2653 	struct mgmt_cp_add_uuid *cp = data;
2654 	struct mgmt_pending_cmd *cmd;
2655 	struct bt_uuid *uuid;
2656 	int err;
2657 
2658 	bt_dev_dbg(hdev, "sock %p", sk);
2659 
2660 	hci_dev_lock(hdev);
2661 
2662 	if (pending_eir_or_class(hdev)) {
2663 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2664 				      MGMT_STATUS_BUSY);
2665 		goto failed;
2666 	}
2667 
2668 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2669 	if (!uuid) {
2670 		err = -ENOMEM;
2671 		goto failed;
2672 	}
2673 
2674 	memcpy(uuid->uuid, cp->uuid, 16);
2675 	uuid->svc_hint = cp->svc_hint;
2676 	uuid->size = get_uuid_size(cp->uuid);
2677 
2678 	list_add_tail(&uuid->list, &hdev->uuids);
2679 
2680 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2681 	if (!cmd) {
2682 		err = -ENOMEM;
2683 		goto failed;
2684 	}
2685 
2686 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2687 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2688 	 */
2689 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2690 				  mgmt_class_complete);
2691 	if (err < 0) {
2692 		mgmt_pending_free(cmd);
2693 		goto failed;
2694 	}
2695 
2696 failed:
2697 	hci_dev_unlock(hdev);
2698 	return err;
2699 }
2700 
2701 static bool enable_service_cache(struct hci_dev *hdev)
2702 {
2703 	if (!hdev_is_powered(hdev))
2704 		return false;
2705 
2706 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2707 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2708 				   CACHE_TIMEOUT);
2709 		return true;
2710 	}
2711 
2712 	return false;
2713 }
2714 
2715 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 {
2717 	int err;
2718 
2719 	err = hci_update_class_sync(hdev);
2720 	if (err)
2721 		return err;
2722 
2723 	return hci_update_eir_sync(hdev);
2724 }
2725 
2726 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2727 		       u16 len)
2728 {
2729 	struct mgmt_cp_remove_uuid *cp = data;
2730 	struct mgmt_pending_cmd *cmd;
2731 	struct bt_uuid *match, *tmp;
2732 	static const u8 bt_uuid_any[] = {
2733 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 	};
2735 	int err, found;
2736 
2737 	bt_dev_dbg(hdev, "sock %p", sk);
2738 
2739 	hci_dev_lock(hdev);
2740 
2741 	if (pending_eir_or_class(hdev)) {
2742 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2743 				      MGMT_STATUS_BUSY);
2744 		goto unlock;
2745 	}
2746 
2747 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2748 		hci_uuids_clear(hdev);
2749 
2750 		if (enable_service_cache(hdev)) {
2751 			err = mgmt_cmd_complete(sk, hdev->id,
2752 						MGMT_OP_REMOVE_UUID,
2753 						0, hdev->dev_class, 3);
2754 			goto unlock;
2755 		}
2756 
2757 		goto update_class;
2758 	}
2759 
2760 	found = 0;
2761 
2762 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2763 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2764 			continue;
2765 
2766 		list_del(&match->list);
2767 		kfree(match);
2768 		found++;
2769 	}
2770 
2771 	if (found == 0) {
2772 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2773 				      MGMT_STATUS_INVALID_PARAMS);
2774 		goto unlock;
2775 	}
2776 
2777 update_class:
2778 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2779 	if (!cmd) {
2780 		err = -ENOMEM;
2781 		goto unlock;
2782 	}
2783 
2784 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2785 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2786 	 */
2787 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2788 				  mgmt_class_complete);
2789 	if (err < 0)
2790 		mgmt_pending_free(cmd);
2791 
2792 unlock:
2793 	hci_dev_unlock(hdev);
2794 	return err;
2795 }
2796 
2797 static int set_class_sync(struct hci_dev *hdev, void *data)
2798 {
2799 	int err = 0;
2800 
2801 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2802 		cancel_delayed_work_sync(&hdev->service_cache);
2803 		err = hci_update_eir_sync(hdev);
2804 	}
2805 
2806 	if (err)
2807 		return err;
2808 
2809 	return hci_update_class_sync(hdev);
2810 }
2811 
2812 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 			 u16 len)
2814 {
2815 	struct mgmt_cp_set_dev_class *cp = data;
2816 	struct mgmt_pending_cmd *cmd;
2817 	int err;
2818 
2819 	bt_dev_dbg(hdev, "sock %p", sk);
2820 
2821 	if (!lmp_bredr_capable(hdev))
2822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 				       MGMT_STATUS_NOT_SUPPORTED);
2824 
2825 	hci_dev_lock(hdev);
2826 
2827 	if (pending_eir_or_class(hdev)) {
2828 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 				      MGMT_STATUS_BUSY);
2830 		goto unlock;
2831 	}
2832 
2833 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2834 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2835 				      MGMT_STATUS_INVALID_PARAMS);
2836 		goto unlock;
2837 	}
2838 
2839 	hdev->major_class = cp->major;
2840 	hdev->minor_class = cp->minor;
2841 
2842 	if (!hdev_is_powered(hdev)) {
2843 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2844 					hdev->dev_class, 3);
2845 		goto unlock;
2846 	}
2847 
2848 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2849 	if (!cmd) {
2850 		err = -ENOMEM;
2851 		goto unlock;
2852 	}
2853 
2854 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2855 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2856 	 */
2857 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2858 				  mgmt_class_complete);
2859 	if (err < 0)
2860 		mgmt_pending_free(cmd);
2861 
2862 unlock:
2863 	hci_dev_unlock(hdev);
2864 	return err;
2865 }
2866 
2867 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2868 			  u16 len)
2869 {
2870 	struct mgmt_cp_load_link_keys *cp = data;
2871 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2872 				   sizeof(struct mgmt_link_key_info));
2873 	u16 key_count, expected_len;
2874 	bool changed;
2875 	int i;
2876 
2877 	bt_dev_dbg(hdev, "sock %p", sk);
2878 
2879 	if (!lmp_bredr_capable(hdev))
2880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 				       MGMT_STATUS_NOT_SUPPORTED);
2882 
2883 	key_count = __le16_to_cpu(cp->key_count);
2884 	if (key_count > max_key_count) {
2885 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2886 			   key_count);
2887 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2888 				       MGMT_STATUS_INVALID_PARAMS);
2889 	}
2890 
2891 	expected_len = struct_size(cp, keys, key_count);
2892 	if (expected_len != len) {
2893 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2894 			   expected_len, len);
2895 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 				       MGMT_STATUS_INVALID_PARAMS);
2897 	}
2898 
2899 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2901 				       MGMT_STATUS_INVALID_PARAMS);
2902 
2903 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2904 		   key_count);
2905 
2906 	hci_dev_lock(hdev);
2907 
2908 	hci_link_keys_clear(hdev);
2909 
2910 	if (cp->debug_keys)
2911 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2912 	else
2913 		changed = hci_dev_test_and_clear_flag(hdev,
2914 						      HCI_KEEP_DEBUG_KEYS);
2915 
2916 	if (changed)
2917 		new_settings(hdev, NULL);
2918 
2919 	for (i = 0; i < key_count; i++) {
2920 		struct mgmt_link_key_info *key = &cp->keys[i];
2921 
2922 		if (hci_is_blocked_key(hdev,
2923 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2924 				       key->val)) {
2925 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2926 				    &key->addr.bdaddr);
2927 			continue;
2928 		}
2929 
2930 		if (key->addr.type != BDADDR_BREDR) {
2931 			bt_dev_warn(hdev,
2932 				    "Invalid link address type %u for %pMR",
2933 				    key->addr.type, &key->addr.bdaddr);
2934 			continue;
2935 		}
2936 
2937 		if (key->type > 0x08) {
2938 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2939 				    key->type, &key->addr.bdaddr);
2940 			continue;
2941 		}
2942 
2943 		/* Always ignore debug keys and require a new pairing if
2944 		 * the user wants to use them.
2945 		 */
2946 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2947 			continue;
2948 
2949 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2950 				 key->type, key->pin_len, NULL);
2951 	}
2952 
2953 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2954 
2955 	hci_dev_unlock(hdev);
2956 
2957 	return 0;
2958 }
2959 
2960 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2961 			   u8 addr_type, struct sock *skip_sk)
2962 {
2963 	struct mgmt_ev_device_unpaired ev;
2964 
2965 	bacpy(&ev.addr.bdaddr, bdaddr);
2966 	ev.addr.type = addr_type;
2967 
2968 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2969 			  skip_sk);
2970 }
2971 
2972 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2973 {
2974 	struct mgmt_pending_cmd *cmd = data;
2975 	struct mgmt_cp_unpair_device *cp = cmd->param;
2976 
2977 	if (!err)
2978 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2979 
2980 	cmd->cmd_complete(cmd, err);
2981 	mgmt_pending_free(cmd);
2982 }
2983 
2984 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2985 {
2986 	struct mgmt_pending_cmd *cmd = data;
2987 	struct mgmt_cp_unpair_device *cp = cmd->param;
2988 	struct hci_conn *conn;
2989 
2990 	if (cp->addr.type == BDADDR_BREDR)
2991 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2992 					       &cp->addr.bdaddr);
2993 	else
2994 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2995 					       le_addr_type(cp->addr.type));
2996 
2997 	if (!conn)
2998 		return 0;
2999 
3000 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3001 	 * will clean up the connection no matter the error.
3002 	 */
3003 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3004 
3005 	return 0;
3006 }
3007 
3008 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3009 			 u16 len)
3010 {
3011 	struct mgmt_cp_unpair_device *cp = data;
3012 	struct mgmt_rp_unpair_device rp;
3013 	struct hci_conn_params *params;
3014 	struct mgmt_pending_cmd *cmd;
3015 	struct hci_conn *conn;
3016 	u8 addr_type;
3017 	int err;
3018 
3019 	memset(&rp, 0, sizeof(rp));
3020 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3021 	rp.addr.type = cp->addr.type;
3022 
3023 	if (!bdaddr_type_is_valid(cp->addr.type))
3024 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3025 					 MGMT_STATUS_INVALID_PARAMS,
3026 					 &rp, sizeof(rp));
3027 
3028 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3029 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3030 					 MGMT_STATUS_INVALID_PARAMS,
3031 					 &rp, sizeof(rp));
3032 
3033 	hci_dev_lock(hdev);
3034 
3035 	if (!hdev_is_powered(hdev)) {
3036 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3037 					MGMT_STATUS_NOT_POWERED, &rp,
3038 					sizeof(rp));
3039 		goto unlock;
3040 	}
3041 
3042 	if (cp->addr.type == BDADDR_BREDR) {
3043 		/* If disconnection is requested, then look up the
3044 		 * connection. If the remote device is connected, it
3045 		 * will be later used to terminate the link.
3046 		 *
3047 		 * Setting it to NULL explicitly will cause no
3048 		 * termination of the link.
3049 		 */
3050 		if (cp->disconnect)
3051 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3052 						       &cp->addr.bdaddr);
3053 		else
3054 			conn = NULL;
3055 
3056 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3057 		if (err < 0) {
3058 			err = mgmt_cmd_complete(sk, hdev->id,
3059 						MGMT_OP_UNPAIR_DEVICE,
3060 						MGMT_STATUS_NOT_PAIRED, &rp,
3061 						sizeof(rp));
3062 			goto unlock;
3063 		}
3064 
3065 		goto done;
3066 	}
3067 
3068 	/* LE address type */
3069 	addr_type = le_addr_type(cp->addr.type);
3070 
3071 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3072 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3073 	if (err < 0) {
3074 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3075 					MGMT_STATUS_NOT_PAIRED, &rp,
3076 					sizeof(rp));
3077 		goto unlock;
3078 	}
3079 
3080 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3081 	if (!conn) {
3082 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3083 		goto done;
3084 	}
3085 
3086 
3087 	/* Defer clearing up the connection parameters until closing to
3088 	 * give a chance of keeping them if a repairing happens.
3089 	 */
3090 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3091 
3092 	/* Disable auto-connection parameters if present */
3093 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3094 	if (params) {
3095 		if (params->explicit_connect)
3096 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3097 		else
3098 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3099 	}
3100 
3101 	/* If disconnection is not requested, then clear the connection
3102 	 * variable so that the link is not terminated.
3103 	 */
3104 	if (!cp->disconnect)
3105 		conn = NULL;
3106 
3107 done:
3108 	/* If the connection variable is set, then termination of the
3109 	 * link is requested.
3110 	 */
3111 	if (!conn) {
3112 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3113 					&rp, sizeof(rp));
3114 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3115 		goto unlock;
3116 	}
3117 
3118 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3119 			       sizeof(*cp));
3120 	if (!cmd) {
3121 		err = -ENOMEM;
3122 		goto unlock;
3123 	}
3124 
3125 	cmd->cmd_complete = addr_cmd_complete;
3126 
3127 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3128 				 unpair_device_complete);
3129 	if (err < 0)
3130 		mgmt_pending_free(cmd);
3131 
3132 unlock:
3133 	hci_dev_unlock(hdev);
3134 	return err;
3135 }
3136 
3137 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3138 {
3139 	struct mgmt_pending_cmd *cmd = data;
3140 
3141 	cmd->cmd_complete(cmd, mgmt_status(err));
3142 	mgmt_pending_free(cmd);
3143 }
3144 
3145 static int disconnect_sync(struct hci_dev *hdev, void *data)
3146 {
3147 	struct mgmt_pending_cmd *cmd = data;
3148 	struct mgmt_cp_disconnect *cp = cmd->param;
3149 	struct hci_conn *conn;
3150 
3151 	if (cp->addr.type == BDADDR_BREDR)
3152 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3153 					       &cp->addr.bdaddr);
3154 	else
3155 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3156 					       le_addr_type(cp->addr.type));
3157 
3158 	if (!conn)
3159 		return -ENOTCONN;
3160 
3161 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3162 	 * will clean up the connection no matter the error.
3163 	 */
3164 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3165 
3166 	return 0;
3167 }
3168 
3169 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3170 		      u16 len)
3171 {
3172 	struct mgmt_cp_disconnect *cp = data;
3173 	struct mgmt_rp_disconnect rp;
3174 	struct mgmt_pending_cmd *cmd;
3175 	int err;
3176 
3177 	bt_dev_dbg(hdev, "sock %p", sk);
3178 
3179 	memset(&rp, 0, sizeof(rp));
3180 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3181 	rp.addr.type = cp->addr.type;
3182 
3183 	if (!bdaddr_type_is_valid(cp->addr.type))
3184 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3185 					 MGMT_STATUS_INVALID_PARAMS,
3186 					 &rp, sizeof(rp));
3187 
3188 	hci_dev_lock(hdev);
3189 
3190 	if (!test_bit(HCI_UP, &hdev->flags)) {
3191 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3192 					MGMT_STATUS_NOT_POWERED, &rp,
3193 					sizeof(rp));
3194 		goto failed;
3195 	}
3196 
3197 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3198 	if (!cmd) {
3199 		err = -ENOMEM;
3200 		goto failed;
3201 	}
3202 
3203 	cmd->cmd_complete = generic_cmd_complete;
3204 
3205 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3206 				 disconnect_complete);
3207 	if (err < 0)
3208 		mgmt_pending_free(cmd);
3209 
3210 failed:
3211 	hci_dev_unlock(hdev);
3212 	return err;
3213 }
3214 
3215 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3216 {
3217 	switch (link_type) {
3218 	case ISO_LINK:
3219 	case LE_LINK:
3220 		switch (addr_type) {
3221 		case ADDR_LE_DEV_PUBLIC:
3222 			return BDADDR_LE_PUBLIC;
3223 
3224 		default:
3225 			/* Fallback to LE Random address type */
3226 			return BDADDR_LE_RANDOM;
3227 		}
3228 
3229 	default:
3230 		/* Fallback to BR/EDR type */
3231 		return BDADDR_BREDR;
3232 	}
3233 }
3234 
3235 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3236 			   u16 data_len)
3237 {
3238 	struct mgmt_rp_get_connections *rp;
3239 	struct hci_conn *c;
3240 	int err;
3241 	u16 i;
3242 
3243 	bt_dev_dbg(hdev, "sock %p", sk);
3244 
3245 	hci_dev_lock(hdev);
3246 
3247 	if (!hdev_is_powered(hdev)) {
3248 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3249 				      MGMT_STATUS_NOT_POWERED);
3250 		goto unlock;
3251 	}
3252 
3253 	i = 0;
3254 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3255 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3256 			i++;
3257 	}
3258 
3259 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3260 	if (!rp) {
3261 		err = -ENOMEM;
3262 		goto unlock;
3263 	}
3264 
3265 	i = 0;
3266 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3267 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3268 			continue;
3269 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3270 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3271 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3272 			continue;
3273 		i++;
3274 	}
3275 
3276 	rp->conn_count = cpu_to_le16(i);
3277 
3278 	/* Recalculate length in case of filtered SCO connections, etc */
3279 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3280 				struct_size(rp, addr, i));
3281 
3282 	kfree(rp);
3283 
3284 unlock:
3285 	hci_dev_unlock(hdev);
3286 	return err;
3287 }
3288 
3289 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3290 				   struct mgmt_cp_pin_code_neg_reply *cp)
3291 {
3292 	struct mgmt_pending_cmd *cmd;
3293 	int err;
3294 
3295 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3296 			       sizeof(*cp));
3297 	if (!cmd)
3298 		return -ENOMEM;
3299 
3300 	cmd->cmd_complete = addr_cmd_complete;
3301 
3302 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3303 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3304 	if (err < 0)
3305 		mgmt_pending_remove(cmd);
3306 
3307 	return err;
3308 }
3309 
3310 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3311 			  u16 len)
3312 {
3313 	struct hci_conn *conn;
3314 	struct mgmt_cp_pin_code_reply *cp = data;
3315 	struct hci_cp_pin_code_reply reply;
3316 	struct mgmt_pending_cmd *cmd;
3317 	int err;
3318 
3319 	bt_dev_dbg(hdev, "sock %p", sk);
3320 
3321 	hci_dev_lock(hdev);
3322 
3323 	if (!hdev_is_powered(hdev)) {
3324 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3325 				      MGMT_STATUS_NOT_POWERED);
3326 		goto failed;
3327 	}
3328 
3329 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3330 	if (!conn) {
3331 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3332 				      MGMT_STATUS_NOT_CONNECTED);
3333 		goto failed;
3334 	}
3335 
3336 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3337 		struct mgmt_cp_pin_code_neg_reply ncp;
3338 
3339 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3340 
3341 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3342 
3343 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3344 		if (err >= 0)
3345 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3346 					      MGMT_STATUS_INVALID_PARAMS);
3347 
3348 		goto failed;
3349 	}
3350 
3351 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3352 	if (!cmd) {
3353 		err = -ENOMEM;
3354 		goto failed;
3355 	}
3356 
3357 	cmd->cmd_complete = addr_cmd_complete;
3358 
3359 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3360 	reply.pin_len = cp->pin_len;
3361 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3362 
3363 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3364 	if (err < 0)
3365 		mgmt_pending_remove(cmd);
3366 
3367 failed:
3368 	hci_dev_unlock(hdev);
3369 	return err;
3370 }
3371 
3372 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3373 			     u16 len)
3374 {
3375 	struct mgmt_cp_set_io_capability *cp = data;
3376 
3377 	bt_dev_dbg(hdev, "sock %p", sk);
3378 
3379 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3381 				       MGMT_STATUS_INVALID_PARAMS);
3382 
3383 	hci_dev_lock(hdev);
3384 
3385 	hdev->io_capability = cp->io_capability;
3386 
3387 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3388 
3389 	hci_dev_unlock(hdev);
3390 
3391 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3392 				 NULL, 0);
3393 }
3394 
3395 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3396 {
3397 	struct hci_dev *hdev = conn->hdev;
3398 	struct mgmt_pending_cmd *cmd;
3399 
3400 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3401 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3402 			continue;
3403 
3404 		if (cmd->user_data != conn)
3405 			continue;
3406 
3407 		return cmd;
3408 	}
3409 
3410 	return NULL;
3411 }
3412 
3413 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3414 {
3415 	struct mgmt_rp_pair_device rp;
3416 	struct hci_conn *conn = cmd->user_data;
3417 	int err;
3418 
3419 	bacpy(&rp.addr.bdaddr, &conn->dst);
3420 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3421 
3422 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3423 				status, &rp, sizeof(rp));
3424 
3425 	/* So we don't get further callbacks for this connection */
3426 	conn->connect_cfm_cb = NULL;
3427 	conn->security_cfm_cb = NULL;
3428 	conn->disconn_cfm_cb = NULL;
3429 
3430 	hci_conn_drop(conn);
3431 
3432 	/* The device is paired so there is no need to remove
3433 	 * its connection parameters anymore.
3434 	 */
3435 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3436 
3437 	hci_conn_put(conn);
3438 
3439 	return err;
3440 }
3441 
3442 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3443 {
3444 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3445 	struct mgmt_pending_cmd *cmd;
3446 
3447 	cmd = find_pairing(conn);
3448 	if (cmd) {
3449 		cmd->cmd_complete(cmd, status);
3450 		mgmt_pending_remove(cmd);
3451 	}
3452 }
3453 
3454 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3455 {
3456 	struct mgmt_pending_cmd *cmd;
3457 
3458 	BT_DBG("status %u", status);
3459 
3460 	cmd = find_pairing(conn);
3461 	if (!cmd) {
3462 		BT_DBG("Unable to find a pending command");
3463 		return;
3464 	}
3465 
3466 	cmd->cmd_complete(cmd, mgmt_status(status));
3467 	mgmt_pending_remove(cmd);
3468 }
3469 
3470 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3471 {
3472 	struct mgmt_pending_cmd *cmd;
3473 
3474 	BT_DBG("status %u", status);
3475 
3476 	if (!status)
3477 		return;
3478 
3479 	cmd = find_pairing(conn);
3480 	if (!cmd) {
3481 		BT_DBG("Unable to find a pending command");
3482 		return;
3483 	}
3484 
3485 	cmd->cmd_complete(cmd, mgmt_status(status));
3486 	mgmt_pending_remove(cmd);
3487 }
3488 
3489 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3490 		       u16 len)
3491 {
3492 	struct mgmt_cp_pair_device *cp = data;
3493 	struct mgmt_rp_pair_device rp;
3494 	struct mgmt_pending_cmd *cmd;
3495 	u8 sec_level, auth_type;
3496 	struct hci_conn *conn;
3497 	int err;
3498 
3499 	bt_dev_dbg(hdev, "sock %p", sk);
3500 
3501 	memset(&rp, 0, sizeof(rp));
3502 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3503 	rp.addr.type = cp->addr.type;
3504 
3505 	if (!bdaddr_type_is_valid(cp->addr.type))
3506 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3507 					 MGMT_STATUS_INVALID_PARAMS,
3508 					 &rp, sizeof(rp));
3509 
3510 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3511 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3512 					 MGMT_STATUS_INVALID_PARAMS,
3513 					 &rp, sizeof(rp));
3514 
3515 	hci_dev_lock(hdev);
3516 
3517 	if (!hdev_is_powered(hdev)) {
3518 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3519 					MGMT_STATUS_NOT_POWERED, &rp,
3520 					sizeof(rp));
3521 		goto unlock;
3522 	}
3523 
3524 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3525 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3526 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3527 					sizeof(rp));
3528 		goto unlock;
3529 	}
3530 
3531 	sec_level = BT_SECURITY_MEDIUM;
3532 	auth_type = HCI_AT_DEDICATED_BONDING;
3533 
3534 	if (cp->addr.type == BDADDR_BREDR) {
3535 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3536 				       auth_type, CONN_REASON_PAIR_DEVICE,
3537 				       HCI_ACL_CONN_TIMEOUT);
3538 	} else {
3539 		u8 addr_type = le_addr_type(cp->addr.type);
3540 		struct hci_conn_params *p;
3541 
3542 		/* When pairing a new device, it is expected to remember
3543 		 * this device for future connections. Adding the connection
3544 		 * parameter information ahead of time allows tracking
3545 		 * of the peripheral preferred values and will speed up any
3546 		 * further connection establishment.
3547 		 *
3548 		 * If connection parameters already exist, then they
3549 		 * will be kept and this function does nothing.
3550 		 */
3551 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3552 		if (!p) {
3553 			err = -EIO;
3554 			goto unlock;
3555 		}
3556 
3557 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3558 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3559 
3560 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3561 					   sec_level, HCI_LE_CONN_TIMEOUT,
3562 					   CONN_REASON_PAIR_DEVICE);
3563 	}
3564 
3565 	if (IS_ERR(conn)) {
3566 		int status;
3567 
3568 		if (PTR_ERR(conn) == -EBUSY)
3569 			status = MGMT_STATUS_BUSY;
3570 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3571 			status = MGMT_STATUS_NOT_SUPPORTED;
3572 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3573 			status = MGMT_STATUS_REJECTED;
3574 		else
3575 			status = MGMT_STATUS_CONNECT_FAILED;
3576 
3577 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3578 					status, &rp, sizeof(rp));
3579 		goto unlock;
3580 	}
3581 
3582 	if (conn->connect_cfm_cb) {
3583 		hci_conn_drop(conn);
3584 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3585 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3586 		goto unlock;
3587 	}
3588 
3589 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3590 	if (!cmd) {
3591 		err = -ENOMEM;
3592 		hci_conn_drop(conn);
3593 		goto unlock;
3594 	}
3595 
3596 	cmd->cmd_complete = pairing_complete;
3597 
3598 	/* For LE, just connecting isn't a proof that the pairing finished */
3599 	if (cp->addr.type == BDADDR_BREDR) {
3600 		conn->connect_cfm_cb = pairing_complete_cb;
3601 		conn->security_cfm_cb = pairing_complete_cb;
3602 		conn->disconn_cfm_cb = pairing_complete_cb;
3603 	} else {
3604 		conn->connect_cfm_cb = le_pairing_complete_cb;
3605 		conn->security_cfm_cb = le_pairing_complete_cb;
3606 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3607 	}
3608 
3609 	conn->io_capability = cp->io_cap;
3610 	cmd->user_data = hci_conn_get(conn);
3611 
3612 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3613 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3614 		cmd->cmd_complete(cmd, 0);
3615 		mgmt_pending_remove(cmd);
3616 	}
3617 
3618 	err = 0;
3619 
3620 unlock:
3621 	hci_dev_unlock(hdev);
3622 	return err;
3623 }
3624 
3625 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3626 			      u16 len)
3627 {
3628 	struct mgmt_addr_info *addr = data;
3629 	struct mgmt_pending_cmd *cmd;
3630 	struct hci_conn *conn;
3631 	int err;
3632 
3633 	bt_dev_dbg(hdev, "sock %p", sk);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3639 				      MGMT_STATUS_NOT_POWERED);
3640 		goto unlock;
3641 	}
3642 
3643 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3644 	if (!cmd) {
3645 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3646 				      MGMT_STATUS_INVALID_PARAMS);
3647 		goto unlock;
3648 	}
3649 
3650 	conn = cmd->user_data;
3651 
3652 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3654 				      MGMT_STATUS_INVALID_PARAMS);
3655 		goto unlock;
3656 	}
3657 
3658 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3659 	mgmt_pending_remove(cmd);
3660 
3661 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3662 				addr, sizeof(*addr));
3663 
3664 	/* Since user doesn't want to proceed with the connection, abort any
3665 	 * ongoing pairing and then terminate the link if it was created
3666 	 * because of the pair device action.
3667 	 */
3668 	if (addr->type == BDADDR_BREDR)
3669 		hci_remove_link_key(hdev, &addr->bdaddr);
3670 	else
3671 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3672 					      le_addr_type(addr->type));
3673 
3674 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3675 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3676 
3677 unlock:
3678 	hci_dev_unlock(hdev);
3679 	return err;
3680 }
3681 
3682 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3683 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3684 			     u16 hci_op, __le32 passkey)
3685 {
3686 	struct mgmt_pending_cmd *cmd;
3687 	struct hci_conn *conn;
3688 	int err;
3689 
3690 	hci_dev_lock(hdev);
3691 
3692 	if (!hdev_is_powered(hdev)) {
3693 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3694 					MGMT_STATUS_NOT_POWERED, addr,
3695 					sizeof(*addr));
3696 		goto done;
3697 	}
3698 
3699 	if (addr->type == BDADDR_BREDR)
3700 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3701 	else
3702 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3703 					       le_addr_type(addr->type));
3704 
3705 	if (!conn) {
3706 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3707 					MGMT_STATUS_NOT_CONNECTED, addr,
3708 					sizeof(*addr));
3709 		goto done;
3710 	}
3711 
3712 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3713 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3714 		if (!err)
3715 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3716 						MGMT_STATUS_SUCCESS, addr,
3717 						sizeof(*addr));
3718 		else
3719 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3720 						MGMT_STATUS_FAILED, addr,
3721 						sizeof(*addr));
3722 
3723 		goto done;
3724 	}
3725 
3726 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3727 	if (!cmd) {
3728 		err = -ENOMEM;
3729 		goto done;
3730 	}
3731 
3732 	cmd->cmd_complete = addr_cmd_complete;
3733 
3734 	/* Continue with pairing via HCI */
3735 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3736 		struct hci_cp_user_passkey_reply cp;
3737 
3738 		bacpy(&cp.bdaddr, &addr->bdaddr);
3739 		cp.passkey = passkey;
3740 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3741 	} else
3742 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3743 				   &addr->bdaddr);
3744 
3745 	if (err < 0)
3746 		mgmt_pending_remove(cmd);
3747 
3748 done:
3749 	hci_dev_unlock(hdev);
3750 	return err;
3751 }
3752 
3753 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3754 			      void *data, u16 len)
3755 {
3756 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3757 
3758 	bt_dev_dbg(hdev, "sock %p", sk);
3759 
3760 	return user_pairing_resp(sk, hdev, &cp->addr,
3761 				MGMT_OP_PIN_CODE_NEG_REPLY,
3762 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3763 }
3764 
3765 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3766 			      u16 len)
3767 {
3768 	struct mgmt_cp_user_confirm_reply *cp = data;
3769 
3770 	bt_dev_dbg(hdev, "sock %p", sk);
3771 
3772 	if (len != sizeof(*cp))
3773 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3774 				       MGMT_STATUS_INVALID_PARAMS);
3775 
3776 	return user_pairing_resp(sk, hdev, &cp->addr,
3777 				 MGMT_OP_USER_CONFIRM_REPLY,
3778 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3779 }
3780 
3781 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3782 				  void *data, u16 len)
3783 {
3784 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3785 
3786 	bt_dev_dbg(hdev, "sock %p", sk);
3787 
3788 	return user_pairing_resp(sk, hdev, &cp->addr,
3789 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3790 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3791 }
3792 
3793 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3794 			      u16 len)
3795 {
3796 	struct mgmt_cp_user_passkey_reply *cp = data;
3797 
3798 	bt_dev_dbg(hdev, "sock %p", sk);
3799 
3800 	return user_pairing_resp(sk, hdev, &cp->addr,
3801 				 MGMT_OP_USER_PASSKEY_REPLY,
3802 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3803 }
3804 
3805 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3806 				  void *data, u16 len)
3807 {
3808 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3809 
3810 	bt_dev_dbg(hdev, "sock %p", sk);
3811 
3812 	return user_pairing_resp(sk, hdev, &cp->addr,
3813 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3814 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3815 }
3816 
3817 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3818 {
3819 	struct adv_info *adv_instance;
3820 
3821 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3822 	if (!adv_instance)
3823 		return 0;
3824 
3825 	/* stop if current instance doesn't need to be changed */
3826 	if (!(adv_instance->flags & flags))
3827 		return 0;
3828 
3829 	cancel_adv_timeout(hdev);
3830 
3831 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3832 	if (!adv_instance)
3833 		return 0;
3834 
3835 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3836 
3837 	return 0;
3838 }
3839 
3840 static int name_changed_sync(struct hci_dev *hdev, void *data)
3841 {
3842 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3843 }
3844 
3845 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3846 {
3847 	struct mgmt_pending_cmd *cmd = data;
3848 	struct mgmt_cp_set_local_name *cp = cmd->param;
3849 	u8 status = mgmt_status(err);
3850 
3851 	bt_dev_dbg(hdev, "err %d", err);
3852 
3853 	if (err == -ECANCELED ||
3854 	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3855 		return;
3856 
3857 	if (status) {
3858 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3859 				status);
3860 	} else {
3861 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3862 				  cp, sizeof(*cp));
3863 
3864 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3865 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3866 	}
3867 
3868 	mgmt_pending_remove(cmd);
3869 }
3870 
3871 static int set_name_sync(struct hci_dev *hdev, void *data)
3872 {
3873 	if (lmp_bredr_capable(hdev)) {
3874 		hci_update_name_sync(hdev);
3875 		hci_update_eir_sync(hdev);
3876 	}
3877 
3878 	/* The name is stored in the scan response data and so
3879 	 * no need to update the advertising data here.
3880 	 */
3881 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3882 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3883 
3884 	return 0;
3885 }
3886 
3887 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3888 			  u16 len)
3889 {
3890 	struct mgmt_cp_set_local_name *cp = data;
3891 	struct mgmt_pending_cmd *cmd;
3892 	int err;
3893 
3894 	bt_dev_dbg(hdev, "sock %p", sk);
3895 
3896 	hci_dev_lock(hdev);
3897 
3898 	/* If the old values are the same as the new ones just return a
3899 	 * direct command complete event.
3900 	 */
3901 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3902 	    !memcmp(hdev->short_name, cp->short_name,
3903 		    sizeof(hdev->short_name))) {
3904 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3905 					data, len);
3906 		goto failed;
3907 	}
3908 
3909 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3910 
3911 	if (!hdev_is_powered(hdev)) {
3912 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3913 
3914 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3915 					data, len);
3916 		if (err < 0)
3917 			goto failed;
3918 
3919 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3920 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3921 		ext_info_changed(hdev, sk);
3922 
3923 		goto failed;
3924 	}
3925 
3926 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3927 	if (!cmd)
3928 		err = -ENOMEM;
3929 	else
3930 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3931 					 set_name_complete);
3932 
3933 	if (err < 0) {
3934 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3935 				      MGMT_STATUS_FAILED);
3936 
3937 		if (cmd)
3938 			mgmt_pending_remove(cmd);
3939 
3940 		goto failed;
3941 	}
3942 
3943 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3944 
3945 failed:
3946 	hci_dev_unlock(hdev);
3947 	return err;
3948 }
3949 
3950 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3951 {
3952 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3953 }
3954 
3955 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3956 			  u16 len)
3957 {
3958 	struct mgmt_cp_set_appearance *cp = data;
3959 	u16 appearance;
3960 	int err;
3961 
3962 	bt_dev_dbg(hdev, "sock %p", sk);
3963 
3964 	if (!lmp_le_capable(hdev))
3965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3966 				       MGMT_STATUS_NOT_SUPPORTED);
3967 
3968 	appearance = le16_to_cpu(cp->appearance);
3969 
3970 	hci_dev_lock(hdev);
3971 
3972 	if (hdev->appearance != appearance) {
3973 		hdev->appearance = appearance;
3974 
3975 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3976 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3977 					   NULL);
3978 
3979 		ext_info_changed(hdev, sk);
3980 	}
3981 
3982 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3983 				0);
3984 
3985 	hci_dev_unlock(hdev);
3986 
3987 	return err;
3988 }
3989 
3990 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3991 				 void *data, u16 len)
3992 {
3993 	struct mgmt_rp_get_phy_configuration rp;
3994 
3995 	bt_dev_dbg(hdev, "sock %p", sk);
3996 
3997 	hci_dev_lock(hdev);
3998 
3999 	memset(&rp, 0, sizeof(rp));
4000 
4001 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4002 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4003 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4004 
4005 	hci_dev_unlock(hdev);
4006 
4007 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4008 				 &rp, sizeof(rp));
4009 }
4010 
4011 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4012 {
4013 	struct mgmt_ev_phy_configuration_changed ev;
4014 
4015 	memset(&ev, 0, sizeof(ev));
4016 
4017 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4018 
4019 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4020 			  sizeof(ev), skip);
4021 }
4022 
4023 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4024 {
4025 	struct mgmt_pending_cmd *cmd = data;
4026 	struct sk_buff *skb = cmd->skb;
4027 	u8 status = mgmt_status(err);
4028 
4029 	if (err == -ECANCELED ||
4030 	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4031 		return;
4032 
4033 	if (!status) {
4034 		if (!skb)
4035 			status = MGMT_STATUS_FAILED;
4036 		else if (IS_ERR(skb))
4037 			status = mgmt_status(PTR_ERR(skb));
4038 		else
4039 			status = mgmt_status(skb->data[0]);
4040 	}
4041 
4042 	bt_dev_dbg(hdev, "status %d", status);
4043 
4044 	if (status) {
4045 		mgmt_cmd_status(cmd->sk, hdev->id,
4046 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4047 	} else {
4048 		mgmt_cmd_complete(cmd->sk, hdev->id,
4049 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4050 				  NULL, 0);
4051 
4052 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4053 	}
4054 
4055 	if (skb && !IS_ERR(skb))
4056 		kfree_skb(skb);
4057 
4058 	mgmt_pending_remove(cmd);
4059 }
4060 
4061 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4062 {
4063 	struct mgmt_pending_cmd *cmd = data;
4064 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4065 	struct hci_cp_le_set_default_phy cp_phy;
4066 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4067 
4068 	memset(&cp_phy, 0, sizeof(cp_phy));
4069 
4070 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4071 		cp_phy.all_phys |= 0x01;
4072 
4073 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4074 		cp_phy.all_phys |= 0x02;
4075 
4076 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4077 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4078 
4079 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4080 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4081 
4082 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4083 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4084 
4085 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4086 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4087 
4088 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4089 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4090 
4091 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4092 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4093 
4094 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4095 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4096 
4097 	return 0;
4098 }
4099 
4100 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4101 				 void *data, u16 len)
4102 {
4103 	struct mgmt_cp_set_phy_configuration *cp = data;
4104 	struct mgmt_pending_cmd *cmd;
4105 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4106 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4107 	bool changed = false;
4108 	int err;
4109 
4110 	bt_dev_dbg(hdev, "sock %p", sk);
4111 
4112 	configurable_phys = get_configurable_phys(hdev);
4113 	supported_phys = get_supported_phys(hdev);
4114 	selected_phys = __le32_to_cpu(cp->selected_phys);
4115 
4116 	if (selected_phys & ~supported_phys)
4117 		return mgmt_cmd_status(sk, hdev->id,
4118 				       MGMT_OP_SET_PHY_CONFIGURATION,
4119 				       MGMT_STATUS_INVALID_PARAMS);
4120 
4121 	unconfigure_phys = supported_phys & ~configurable_phys;
4122 
4123 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4124 		return mgmt_cmd_status(sk, hdev->id,
4125 				       MGMT_OP_SET_PHY_CONFIGURATION,
4126 				       MGMT_STATUS_INVALID_PARAMS);
4127 
4128 	if (selected_phys == get_selected_phys(hdev))
4129 		return mgmt_cmd_complete(sk, hdev->id,
4130 					 MGMT_OP_SET_PHY_CONFIGURATION,
4131 					 0, NULL, 0);
4132 
4133 	hci_dev_lock(hdev);
4134 
4135 	if (!hdev_is_powered(hdev)) {
4136 		err = mgmt_cmd_status(sk, hdev->id,
4137 				      MGMT_OP_SET_PHY_CONFIGURATION,
4138 				      MGMT_STATUS_REJECTED);
4139 		goto unlock;
4140 	}
4141 
4142 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4143 		err = mgmt_cmd_status(sk, hdev->id,
4144 				      MGMT_OP_SET_PHY_CONFIGURATION,
4145 				      MGMT_STATUS_BUSY);
4146 		goto unlock;
4147 	}
4148 
4149 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4150 		pkt_type |= (HCI_DH3 | HCI_DM3);
4151 	else
4152 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4153 
4154 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4155 		pkt_type |= (HCI_DH5 | HCI_DM5);
4156 	else
4157 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4158 
4159 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4160 		pkt_type &= ~HCI_2DH1;
4161 	else
4162 		pkt_type |= HCI_2DH1;
4163 
4164 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4165 		pkt_type &= ~HCI_2DH3;
4166 	else
4167 		pkt_type |= HCI_2DH3;
4168 
4169 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4170 		pkt_type &= ~HCI_2DH5;
4171 	else
4172 		pkt_type |= HCI_2DH5;
4173 
4174 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4175 		pkt_type &= ~HCI_3DH1;
4176 	else
4177 		pkt_type |= HCI_3DH1;
4178 
4179 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4180 		pkt_type &= ~HCI_3DH3;
4181 	else
4182 		pkt_type |= HCI_3DH3;
4183 
4184 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4185 		pkt_type &= ~HCI_3DH5;
4186 	else
4187 		pkt_type |= HCI_3DH5;
4188 
4189 	if (pkt_type != hdev->pkt_type) {
4190 		hdev->pkt_type = pkt_type;
4191 		changed = true;
4192 	}
4193 
4194 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4195 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4196 		if (changed)
4197 			mgmt_phy_configuration_changed(hdev, sk);
4198 
4199 		err = mgmt_cmd_complete(sk, hdev->id,
4200 					MGMT_OP_SET_PHY_CONFIGURATION,
4201 					0, NULL, 0);
4202 
4203 		goto unlock;
4204 	}
4205 
4206 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4207 			       len);
4208 	if (!cmd)
4209 		err = -ENOMEM;
4210 	else
4211 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4212 					 set_default_phy_complete);
4213 
4214 	if (err < 0) {
4215 		err = mgmt_cmd_status(sk, hdev->id,
4216 				      MGMT_OP_SET_PHY_CONFIGURATION,
4217 				      MGMT_STATUS_FAILED);
4218 
4219 		if (cmd)
4220 			mgmt_pending_remove(cmd);
4221 	}
4222 
4223 unlock:
4224 	hci_dev_unlock(hdev);
4225 
4226 	return err;
4227 }
4228 
4229 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4230 			    u16 len)
4231 {
4232 	int err = MGMT_STATUS_SUCCESS;
4233 	struct mgmt_cp_set_blocked_keys *keys = data;
4234 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4235 				   sizeof(struct mgmt_blocked_key_info));
4236 	u16 key_count, expected_len;
4237 	int i;
4238 
4239 	bt_dev_dbg(hdev, "sock %p", sk);
4240 
4241 	key_count = __le16_to_cpu(keys->key_count);
4242 	if (key_count > max_key_count) {
4243 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4244 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 				       MGMT_STATUS_INVALID_PARAMS);
4246 	}
4247 
4248 	expected_len = struct_size(keys, keys, key_count);
4249 	if (expected_len != len) {
4250 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4251 			   expected_len, len);
4252 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4253 				       MGMT_STATUS_INVALID_PARAMS);
4254 	}
4255 
4256 	hci_dev_lock(hdev);
4257 
4258 	hci_blocked_keys_clear(hdev);
4259 
4260 	for (i = 0; i < key_count; ++i) {
4261 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4262 
4263 		if (!b) {
4264 			err = MGMT_STATUS_NO_RESOURCES;
4265 			break;
4266 		}
4267 
4268 		b->type = keys->keys[i].type;
4269 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4270 		list_add_rcu(&b->list, &hdev->blocked_keys);
4271 	}
4272 	hci_dev_unlock(hdev);
4273 
4274 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4275 				err, NULL, 0);
4276 }
4277 
4278 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4279 			       void *data, u16 len)
4280 {
4281 	struct mgmt_mode *cp = data;
4282 	int err;
4283 	bool changed = false;
4284 
4285 	bt_dev_dbg(hdev, "sock %p", sk);
4286 
4287 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4288 		return mgmt_cmd_status(sk, hdev->id,
4289 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4290 				       MGMT_STATUS_NOT_SUPPORTED);
4291 
4292 	if (cp->val != 0x00 && cp->val != 0x01)
4293 		return mgmt_cmd_status(sk, hdev->id,
4294 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4295 				       MGMT_STATUS_INVALID_PARAMS);
4296 
4297 	hci_dev_lock(hdev);
4298 
4299 	if (hdev_is_powered(hdev) &&
4300 	    !!cp->val != hci_dev_test_flag(hdev,
4301 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4302 		err = mgmt_cmd_status(sk, hdev->id,
4303 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4304 				      MGMT_STATUS_REJECTED);
4305 		goto unlock;
4306 	}
4307 
4308 	if (cp->val)
4309 		changed = !hci_dev_test_and_set_flag(hdev,
4310 						   HCI_WIDEBAND_SPEECH_ENABLED);
4311 	else
4312 		changed = hci_dev_test_and_clear_flag(hdev,
4313 						   HCI_WIDEBAND_SPEECH_ENABLED);
4314 
4315 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4316 	if (err < 0)
4317 		goto unlock;
4318 
4319 	if (changed)
4320 		err = new_settings(hdev, sk);
4321 
4322 unlock:
4323 	hci_dev_unlock(hdev);
4324 	return err;
4325 }
4326 
4327 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4328 			       void *data, u16 data_len)
4329 {
4330 	char buf[20];
4331 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4332 	u16 cap_len = 0;
4333 	u8 flags = 0;
4334 	u8 tx_power_range[2];
4335 
4336 	bt_dev_dbg(hdev, "sock %p", sk);
4337 
4338 	memset(&buf, 0, sizeof(buf));
4339 
4340 	hci_dev_lock(hdev);
4341 
4342 	/* When the Read Simple Pairing Options command is supported, then
4343 	 * the remote public key validation is supported.
4344 	 *
4345 	 * Alternatively, when Microsoft extensions are available, they can
4346 	 * indicate support for public key validation as well.
4347 	 */
4348 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4349 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4350 
4351 	flags |= 0x02;		/* Remote public key validation (LE) */
4352 
4353 	/* When the Read Encryption Key Size command is supported, then the
4354 	 * encryption key size is enforced.
4355 	 */
4356 	if (hdev->commands[20] & 0x10)
4357 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4358 
4359 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4360 
4361 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4362 				  &flags, 1);
4363 
4364 	/* When the Read Simple Pairing Options command is supported, then
4365 	 * also max encryption key size information is provided.
4366 	 */
4367 	if (hdev->commands[41] & 0x08)
4368 		cap_len = eir_append_le16(rp->cap, cap_len,
4369 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4370 					  hdev->max_enc_key_size);
4371 
4372 	cap_len = eir_append_le16(rp->cap, cap_len,
4373 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4374 				  SMP_MAX_ENC_KEY_SIZE);
4375 
4376 	/* Append the min/max LE tx power parameters if we were able to fetch
4377 	 * it from the controller
4378 	 */
4379 	if (hdev->commands[38] & 0x80) {
4380 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4381 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4382 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4383 					  tx_power_range, 2);
4384 	}
4385 
4386 	rp->cap_len = cpu_to_le16(cap_len);
4387 
4388 	hci_dev_unlock(hdev);
4389 
4390 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4391 				 rp, sizeof(*rp) + cap_len);
4392 }
4393 
4394 #ifdef CONFIG_BT_FEATURE_DEBUG
4395 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4396 static const u8 debug_uuid[16] = {
4397 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4398 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4399 };
4400 #endif
4401 
4402 /* 330859bc-7506-492d-9370-9a6f0614037f */
4403 static const u8 quality_report_uuid[16] = {
4404 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4405 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4406 };
4407 
4408 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4409 static const u8 offload_codecs_uuid[16] = {
4410 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4411 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4412 };
4413 
4414 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4415 static const u8 le_simultaneous_roles_uuid[16] = {
4416 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4417 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4418 };
4419 
4420 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4421 static const u8 rpa_resolution_uuid[16] = {
4422 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4423 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4424 };
4425 
4426 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4427 static const u8 iso_socket_uuid[16] = {
4428 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4429 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4430 };
4431 
4432 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4433 static const u8 mgmt_mesh_uuid[16] = {
4434 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4435 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4436 };
4437 
4438 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4439 				  void *data, u16 data_len)
4440 {
4441 	struct mgmt_rp_read_exp_features_info *rp;
4442 	size_t len;
4443 	u16 idx = 0;
4444 	u32 flags;
4445 	int status;
4446 
4447 	bt_dev_dbg(hdev, "sock %p", sk);
4448 
4449 	/* Enough space for 7 features */
4450 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4451 	rp = kzalloc(len, GFP_KERNEL);
4452 	if (!rp)
4453 		return -ENOMEM;
4454 
4455 #ifdef CONFIG_BT_FEATURE_DEBUG
4456 	if (!hdev) {
4457 		flags = bt_dbg_get() ? BIT(0) : 0;
4458 
4459 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4460 		rp->features[idx].flags = cpu_to_le32(flags);
4461 		idx++;
4462 	}
4463 #endif
4464 
4465 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4466 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4467 			flags = BIT(0);
4468 		else
4469 			flags = 0;
4470 
4471 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4472 		rp->features[idx].flags = cpu_to_le32(flags);
4473 		idx++;
4474 	}
4475 
4476 	if (hdev && ll_privacy_capable(hdev)) {
4477 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4478 			flags = BIT(0) | BIT(1);
4479 		else
4480 			flags = BIT(1);
4481 
4482 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4483 		rp->features[idx].flags = cpu_to_le32(flags);
4484 		idx++;
4485 	}
4486 
4487 	if (hdev && (aosp_has_quality_report(hdev) ||
4488 		     hdev->set_quality_report)) {
4489 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4490 			flags = BIT(0);
4491 		else
4492 			flags = 0;
4493 
4494 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4495 		rp->features[idx].flags = cpu_to_le32(flags);
4496 		idx++;
4497 	}
4498 
4499 	if (hdev && hdev->get_data_path_id) {
4500 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4501 			flags = BIT(0);
4502 		else
4503 			flags = 0;
4504 
4505 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4506 		rp->features[idx].flags = cpu_to_le32(flags);
4507 		idx++;
4508 	}
4509 
4510 	if (IS_ENABLED(CONFIG_BT_LE)) {
4511 		flags = iso_enabled() ? BIT(0) : 0;
4512 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4513 		rp->features[idx].flags = cpu_to_le32(flags);
4514 		idx++;
4515 	}
4516 
4517 	if (hdev && lmp_le_capable(hdev)) {
4518 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4519 			flags = BIT(0);
4520 		else
4521 			flags = 0;
4522 
4523 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4524 		rp->features[idx].flags = cpu_to_le32(flags);
4525 		idx++;
4526 	}
4527 
4528 	rp->feature_count = cpu_to_le16(idx);
4529 
4530 	/* After reading the experimental features information, enable
4531 	 * the events to update client on any future change.
4532 	 */
4533 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4534 
4535 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4536 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4537 				   0, rp, sizeof(*rp) + (20 * idx));
4538 
4539 	kfree(rp);
4540 	return status;
4541 }
4542 
4543 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4544 					  struct sock *skip)
4545 {
4546 	struct mgmt_ev_exp_feature_changed ev;
4547 
4548 	memset(&ev, 0, sizeof(ev));
4549 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4550 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4551 
4552 	// Do we need to be atomic with the conn_flags?
4553 	if (enabled && privacy_mode_capable(hdev))
4554 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4555 	else
4556 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4557 
4558 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4559 				  &ev, sizeof(ev),
4560 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4561 
4562 }
4563 
4564 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4565 			       bool enabled, struct sock *skip)
4566 {
4567 	struct mgmt_ev_exp_feature_changed ev;
4568 
4569 	memset(&ev, 0, sizeof(ev));
4570 	memcpy(ev.uuid, uuid, 16);
4571 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4572 
4573 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4574 				  &ev, sizeof(ev),
4575 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4576 }
4577 
4578 #define EXP_FEAT(_uuid, _set_func)	\
4579 {					\
4580 	.uuid = _uuid,			\
4581 	.set_func = _set_func,		\
4582 }
4583 
4584 /* The zero key uuid is special. Multiple exp features are set through it. */
4585 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4586 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4587 {
4588 	struct mgmt_rp_set_exp_feature rp;
4589 
4590 	memset(rp.uuid, 0, 16);
4591 	rp.flags = cpu_to_le32(0);
4592 
4593 #ifdef CONFIG_BT_FEATURE_DEBUG
4594 	if (!hdev) {
4595 		bool changed = bt_dbg_get();
4596 
4597 		bt_dbg_set(false);
4598 
4599 		if (changed)
4600 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4601 	}
4602 #endif
4603 
4604 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4605 		bool changed;
4606 
4607 		changed = hci_dev_test_and_clear_flag(hdev,
4608 						      HCI_ENABLE_LL_PRIVACY);
4609 		if (changed)
4610 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4611 					    sk);
4612 	}
4613 
4614 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4615 
4616 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4617 				 MGMT_OP_SET_EXP_FEATURE, 0,
4618 				 &rp, sizeof(rp));
4619 }
4620 
4621 #ifdef CONFIG_BT_FEATURE_DEBUG
4622 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4623 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4624 {
4625 	struct mgmt_rp_set_exp_feature rp;
4626 
4627 	bool val, changed;
4628 	int err;
4629 
4630 	/* Command requires to use the non-controller index */
4631 	if (hdev)
4632 		return mgmt_cmd_status(sk, hdev->id,
4633 				       MGMT_OP_SET_EXP_FEATURE,
4634 				       MGMT_STATUS_INVALID_INDEX);
4635 
4636 	/* Parameters are limited to a single octet */
4637 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4638 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4639 				       MGMT_OP_SET_EXP_FEATURE,
4640 				       MGMT_STATUS_INVALID_PARAMS);
4641 
4642 	/* Only boolean on/off is supported */
4643 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4644 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 				       MGMT_OP_SET_EXP_FEATURE,
4646 				       MGMT_STATUS_INVALID_PARAMS);
4647 
4648 	val = !!cp->param[0];
4649 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4650 	bt_dbg_set(val);
4651 
4652 	memcpy(rp.uuid, debug_uuid, 16);
4653 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4654 
4655 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4656 
4657 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4658 				MGMT_OP_SET_EXP_FEATURE, 0,
4659 				&rp, sizeof(rp));
4660 
4661 	if (changed)
4662 		exp_feature_changed(hdev, debug_uuid, val, sk);
4663 
4664 	return err;
4665 }
4666 #endif
4667 
4668 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4669 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4670 {
4671 	struct mgmt_rp_set_exp_feature rp;
4672 	bool val, changed;
4673 	int err;
4674 
4675 	/* Command requires to use the controller index */
4676 	if (!hdev)
4677 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4678 				       MGMT_OP_SET_EXP_FEATURE,
4679 				       MGMT_STATUS_INVALID_INDEX);
4680 
4681 	/* Parameters are limited to a single octet */
4682 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4683 		return mgmt_cmd_status(sk, hdev->id,
4684 				       MGMT_OP_SET_EXP_FEATURE,
4685 				       MGMT_STATUS_INVALID_PARAMS);
4686 
4687 	/* Only boolean on/off is supported */
4688 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4689 		return mgmt_cmd_status(sk, hdev->id,
4690 				       MGMT_OP_SET_EXP_FEATURE,
4691 				       MGMT_STATUS_INVALID_PARAMS);
4692 
4693 	val = !!cp->param[0];
4694 
4695 	if (val) {
4696 		changed = !hci_dev_test_and_set_flag(hdev,
4697 						     HCI_MESH_EXPERIMENTAL);
4698 	} else {
4699 		hci_dev_clear_flag(hdev, HCI_MESH);
4700 		changed = hci_dev_test_and_clear_flag(hdev,
4701 						      HCI_MESH_EXPERIMENTAL);
4702 	}
4703 
4704 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4705 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4706 
4707 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4708 
4709 	err = mgmt_cmd_complete(sk, hdev->id,
4710 				MGMT_OP_SET_EXP_FEATURE, 0,
4711 				&rp, sizeof(rp));
4712 
4713 	if (changed)
4714 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4715 
4716 	return err;
4717 }
4718 
4719 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4720 				   struct mgmt_cp_set_exp_feature *cp,
4721 				   u16 data_len)
4722 {
4723 	struct mgmt_rp_set_exp_feature rp;
4724 	bool val, changed;
4725 	int err;
4726 	u32 flags;
4727 
4728 	/* Command requires to use the controller index */
4729 	if (!hdev)
4730 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4731 				       MGMT_OP_SET_EXP_FEATURE,
4732 				       MGMT_STATUS_INVALID_INDEX);
4733 
4734 	/* Changes can only be made when controller is powered down */
4735 	if (hdev_is_powered(hdev))
4736 		return mgmt_cmd_status(sk, hdev->id,
4737 				       MGMT_OP_SET_EXP_FEATURE,
4738 				       MGMT_STATUS_REJECTED);
4739 
4740 	/* Parameters are limited to a single octet */
4741 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4742 		return mgmt_cmd_status(sk, hdev->id,
4743 				       MGMT_OP_SET_EXP_FEATURE,
4744 				       MGMT_STATUS_INVALID_PARAMS);
4745 
4746 	/* Only boolean on/off is supported */
4747 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4748 		return mgmt_cmd_status(sk, hdev->id,
4749 				       MGMT_OP_SET_EXP_FEATURE,
4750 				       MGMT_STATUS_INVALID_PARAMS);
4751 
4752 	val = !!cp->param[0];
4753 
4754 	if (val) {
4755 		changed = !hci_dev_test_and_set_flag(hdev,
4756 						     HCI_ENABLE_LL_PRIVACY);
4757 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4758 
4759 		/* Enable LL privacy + supported settings changed */
4760 		flags = BIT(0) | BIT(1);
4761 	} else {
4762 		changed = hci_dev_test_and_clear_flag(hdev,
4763 						      HCI_ENABLE_LL_PRIVACY);
4764 
4765 		/* Disable LL privacy + supported settings changed */
4766 		flags = BIT(1);
4767 	}
4768 
4769 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4770 	rp.flags = cpu_to_le32(flags);
4771 
4772 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4773 
4774 	err = mgmt_cmd_complete(sk, hdev->id,
4775 				MGMT_OP_SET_EXP_FEATURE, 0,
4776 				&rp, sizeof(rp));
4777 
4778 	if (changed)
4779 		exp_ll_privacy_feature_changed(val, hdev, sk);
4780 
4781 	return err;
4782 }
4783 
4784 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4785 				   struct mgmt_cp_set_exp_feature *cp,
4786 				   u16 data_len)
4787 {
4788 	struct mgmt_rp_set_exp_feature rp;
4789 	bool val, changed;
4790 	int err;
4791 
4792 	/* Command requires to use a valid controller index */
4793 	if (!hdev)
4794 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4795 				       MGMT_OP_SET_EXP_FEATURE,
4796 				       MGMT_STATUS_INVALID_INDEX);
4797 
4798 	/* Parameters are limited to a single octet */
4799 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4800 		return mgmt_cmd_status(sk, hdev->id,
4801 				       MGMT_OP_SET_EXP_FEATURE,
4802 				       MGMT_STATUS_INVALID_PARAMS);
4803 
4804 	/* Only boolean on/off is supported */
4805 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4806 		return mgmt_cmd_status(sk, hdev->id,
4807 				       MGMT_OP_SET_EXP_FEATURE,
4808 				       MGMT_STATUS_INVALID_PARAMS);
4809 
4810 	hci_req_sync_lock(hdev);
4811 
4812 	val = !!cp->param[0];
4813 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4814 
4815 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4816 		err = mgmt_cmd_status(sk, hdev->id,
4817 				      MGMT_OP_SET_EXP_FEATURE,
4818 				      MGMT_STATUS_NOT_SUPPORTED);
4819 		goto unlock_quality_report;
4820 	}
4821 
4822 	if (changed) {
4823 		if (hdev->set_quality_report)
4824 			err = hdev->set_quality_report(hdev, val);
4825 		else
4826 			err = aosp_set_quality_report(hdev, val);
4827 
4828 		if (err) {
4829 			err = mgmt_cmd_status(sk, hdev->id,
4830 					      MGMT_OP_SET_EXP_FEATURE,
4831 					      MGMT_STATUS_FAILED);
4832 			goto unlock_quality_report;
4833 		}
4834 
4835 		if (val)
4836 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4837 		else
4838 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4839 	}
4840 
4841 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4842 
4843 	memcpy(rp.uuid, quality_report_uuid, 16);
4844 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4845 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4846 
4847 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4848 				&rp, sizeof(rp));
4849 
4850 	if (changed)
4851 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4852 
4853 unlock_quality_report:
4854 	hci_req_sync_unlock(hdev);
4855 	return err;
4856 }
4857 
4858 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4859 				  struct mgmt_cp_set_exp_feature *cp,
4860 				  u16 data_len)
4861 {
4862 	bool val, changed;
4863 	int err;
4864 	struct mgmt_rp_set_exp_feature rp;
4865 
4866 	/* Command requires to use a valid controller index */
4867 	if (!hdev)
4868 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4869 				       MGMT_OP_SET_EXP_FEATURE,
4870 				       MGMT_STATUS_INVALID_INDEX);
4871 
4872 	/* Parameters are limited to a single octet */
4873 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4874 		return mgmt_cmd_status(sk, hdev->id,
4875 				       MGMT_OP_SET_EXP_FEATURE,
4876 				       MGMT_STATUS_INVALID_PARAMS);
4877 
4878 	/* Only boolean on/off is supported */
4879 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4880 		return mgmt_cmd_status(sk, hdev->id,
4881 				       MGMT_OP_SET_EXP_FEATURE,
4882 				       MGMT_STATUS_INVALID_PARAMS);
4883 
4884 	val = !!cp->param[0];
4885 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4886 
4887 	if (!hdev->get_data_path_id) {
4888 		return mgmt_cmd_status(sk, hdev->id,
4889 				       MGMT_OP_SET_EXP_FEATURE,
4890 				       MGMT_STATUS_NOT_SUPPORTED);
4891 	}
4892 
4893 	if (changed) {
4894 		if (val)
4895 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4896 		else
4897 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4898 	}
4899 
4900 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4901 		    val, changed);
4902 
4903 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4904 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4905 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4906 	err = mgmt_cmd_complete(sk, hdev->id,
4907 				MGMT_OP_SET_EXP_FEATURE, 0,
4908 				&rp, sizeof(rp));
4909 
4910 	if (changed)
4911 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4912 
4913 	return err;
4914 }
4915 
4916 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4917 					  struct mgmt_cp_set_exp_feature *cp,
4918 					  u16 data_len)
4919 {
4920 	bool val, changed;
4921 	int err;
4922 	struct mgmt_rp_set_exp_feature rp;
4923 
4924 	/* Command requires to use a valid controller index */
4925 	if (!hdev)
4926 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4927 				       MGMT_OP_SET_EXP_FEATURE,
4928 				       MGMT_STATUS_INVALID_INDEX);
4929 
4930 	/* Parameters are limited to a single octet */
4931 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4932 		return mgmt_cmd_status(sk, hdev->id,
4933 				       MGMT_OP_SET_EXP_FEATURE,
4934 				       MGMT_STATUS_INVALID_PARAMS);
4935 
4936 	/* Only boolean on/off is supported */
4937 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4938 		return mgmt_cmd_status(sk, hdev->id,
4939 				       MGMT_OP_SET_EXP_FEATURE,
4940 				       MGMT_STATUS_INVALID_PARAMS);
4941 
4942 	val = !!cp->param[0];
4943 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4944 
4945 	if (!hci_dev_le_state_simultaneous(hdev)) {
4946 		return mgmt_cmd_status(sk, hdev->id,
4947 				       MGMT_OP_SET_EXP_FEATURE,
4948 				       MGMT_STATUS_NOT_SUPPORTED);
4949 	}
4950 
4951 	if (changed) {
4952 		if (val)
4953 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4954 		else
4955 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4956 	}
4957 
4958 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4959 		    val, changed);
4960 
4961 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4962 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4963 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4964 	err = mgmt_cmd_complete(sk, hdev->id,
4965 				MGMT_OP_SET_EXP_FEATURE, 0,
4966 				&rp, sizeof(rp));
4967 
4968 	if (changed)
4969 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4970 
4971 	return err;
4972 }
4973 
4974 #ifdef CONFIG_BT_LE
4975 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4976 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4977 {
4978 	struct mgmt_rp_set_exp_feature rp;
4979 	bool val, changed = false;
4980 	int err;
4981 
4982 	/* Command requires to use the non-controller index */
4983 	if (hdev)
4984 		return mgmt_cmd_status(sk, hdev->id,
4985 				       MGMT_OP_SET_EXP_FEATURE,
4986 				       MGMT_STATUS_INVALID_INDEX);
4987 
4988 	/* Parameters are limited to a single octet */
4989 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4990 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4991 				       MGMT_OP_SET_EXP_FEATURE,
4992 				       MGMT_STATUS_INVALID_PARAMS);
4993 
4994 	/* Only boolean on/off is supported */
4995 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4996 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4997 				       MGMT_OP_SET_EXP_FEATURE,
4998 				       MGMT_STATUS_INVALID_PARAMS);
4999 
5000 	val = cp->param[0] ? true : false;
5001 	if (val)
5002 		err = iso_init();
5003 	else
5004 		err = iso_exit();
5005 
5006 	if (!err)
5007 		changed = true;
5008 
5009 	memcpy(rp.uuid, iso_socket_uuid, 16);
5010 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5011 
5012 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5013 
5014 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5015 				MGMT_OP_SET_EXP_FEATURE, 0,
5016 				&rp, sizeof(rp));
5017 
5018 	if (changed)
5019 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5020 
5021 	return err;
5022 }
5023 #endif
5024 
5025 static const struct mgmt_exp_feature {
5026 	const u8 *uuid;
5027 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5028 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5029 } exp_features[] = {
5030 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5031 #ifdef CONFIG_BT_FEATURE_DEBUG
5032 	EXP_FEAT(debug_uuid, set_debug_func),
5033 #endif
5034 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5035 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5036 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5037 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5038 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5039 #ifdef CONFIG_BT_LE
5040 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5041 #endif
5042 
5043 	/* end with a null feature */
5044 	EXP_FEAT(NULL, NULL)
5045 };
5046 
5047 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5048 			   void *data, u16 data_len)
5049 {
5050 	struct mgmt_cp_set_exp_feature *cp = data;
5051 	size_t i = 0;
5052 
5053 	bt_dev_dbg(hdev, "sock %p", sk);
5054 
5055 	for (i = 0; exp_features[i].uuid; i++) {
5056 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5057 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5058 	}
5059 
5060 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5061 			       MGMT_OP_SET_EXP_FEATURE,
5062 			       MGMT_STATUS_NOT_SUPPORTED);
5063 }
5064 
5065 static u32 get_params_flags(struct hci_dev *hdev,
5066 			    struct hci_conn_params *params)
5067 {
5068 	u32 flags = hdev->conn_flags;
5069 
5070 	/* Devices using RPAs can only be programmed in the acceptlist if
5071 	 * LL Privacy has been enable otherwise they cannot mark
5072 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5073 	 */
5074 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5075 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5076 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5077 
5078 	return flags;
5079 }
5080 
5081 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5082 			    u16 data_len)
5083 {
5084 	struct mgmt_cp_get_device_flags *cp = data;
5085 	struct mgmt_rp_get_device_flags rp;
5086 	struct bdaddr_list_with_flags *br_params;
5087 	struct hci_conn_params *params;
5088 	u32 supported_flags;
5089 	u32 current_flags = 0;
5090 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5091 
5092 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5093 		   &cp->addr.bdaddr, cp->addr.type);
5094 
5095 	hci_dev_lock(hdev);
5096 
5097 	supported_flags = hdev->conn_flags;
5098 
5099 	memset(&rp, 0, sizeof(rp));
5100 
5101 	if (cp->addr.type == BDADDR_BREDR) {
5102 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5103 							      &cp->addr.bdaddr,
5104 							      cp->addr.type);
5105 		if (!br_params)
5106 			goto done;
5107 
5108 		current_flags = br_params->flags;
5109 	} else {
5110 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5111 						le_addr_type(cp->addr.type));
5112 		if (!params)
5113 			goto done;
5114 
5115 		supported_flags = get_params_flags(hdev, params);
5116 		current_flags = params->flags;
5117 	}
5118 
5119 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5120 	rp.addr.type = cp->addr.type;
5121 	rp.supported_flags = cpu_to_le32(supported_flags);
5122 	rp.current_flags = cpu_to_le32(current_flags);
5123 
5124 	status = MGMT_STATUS_SUCCESS;
5125 
5126 done:
5127 	hci_dev_unlock(hdev);
5128 
5129 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5130 				&rp, sizeof(rp));
5131 }
5132 
5133 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5134 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5135 				 u32 supported_flags, u32 current_flags)
5136 {
5137 	struct mgmt_ev_device_flags_changed ev;
5138 
5139 	bacpy(&ev.addr.bdaddr, bdaddr);
5140 	ev.addr.type = bdaddr_type;
5141 	ev.supported_flags = cpu_to_le32(supported_flags);
5142 	ev.current_flags = cpu_to_le32(current_flags);
5143 
5144 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5145 }
5146 
5147 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5148 			    u16 len)
5149 {
5150 	struct mgmt_cp_set_device_flags *cp = data;
5151 	struct bdaddr_list_with_flags *br_params;
5152 	struct hci_conn_params *params;
5153 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5154 	u32 supported_flags;
5155 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5156 
5157 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5158 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5159 
5160 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5161 	supported_flags = hdev->conn_flags;
5162 
5163 	if ((supported_flags | current_flags) != supported_flags) {
5164 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5165 			    current_flags, supported_flags);
5166 		goto done;
5167 	}
5168 
5169 	hci_dev_lock(hdev);
5170 
5171 	if (cp->addr.type == BDADDR_BREDR) {
5172 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5173 							      &cp->addr.bdaddr,
5174 							      cp->addr.type);
5175 
5176 		if (br_params) {
5177 			br_params->flags = current_flags;
5178 			status = MGMT_STATUS_SUCCESS;
5179 		} else {
5180 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5181 				    &cp->addr.bdaddr, cp->addr.type);
5182 		}
5183 
5184 		goto unlock;
5185 	}
5186 
5187 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5188 					le_addr_type(cp->addr.type));
5189 	if (!params) {
5190 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5191 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5192 		goto unlock;
5193 	}
5194 
5195 	supported_flags = get_params_flags(hdev, params);
5196 
5197 	if ((supported_flags | current_flags) != supported_flags) {
5198 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5199 			    current_flags, supported_flags);
5200 		goto unlock;
5201 	}
5202 
5203 	WRITE_ONCE(params->flags, current_flags);
5204 	status = MGMT_STATUS_SUCCESS;
5205 
5206 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5207 	 * has been set.
5208 	 */
5209 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5210 		hci_update_passive_scan(hdev);
5211 
5212 unlock:
5213 	hci_dev_unlock(hdev);
5214 
5215 done:
5216 	if (status == MGMT_STATUS_SUCCESS)
5217 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5218 				     supported_flags, current_flags);
5219 
5220 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5221 				 &cp->addr, sizeof(cp->addr));
5222 }
5223 
5224 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5225 				   u16 handle)
5226 {
5227 	struct mgmt_ev_adv_monitor_added ev;
5228 
5229 	ev.monitor_handle = cpu_to_le16(handle);
5230 
5231 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5232 }
5233 
5234 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5235 {
5236 	struct mgmt_ev_adv_monitor_removed ev;
5237 	struct mgmt_pending_cmd *cmd;
5238 	struct sock *sk_skip = NULL;
5239 	struct mgmt_cp_remove_adv_monitor *cp;
5240 
5241 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5242 	if (cmd) {
5243 		cp = cmd->param;
5244 
5245 		if (cp->monitor_handle)
5246 			sk_skip = cmd->sk;
5247 	}
5248 
5249 	ev.monitor_handle = cpu_to_le16(handle);
5250 
5251 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5252 }
5253 
5254 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5255 				 void *data, u16 len)
5256 {
5257 	struct adv_monitor *monitor = NULL;
5258 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5259 	int handle, err;
5260 	size_t rp_size = 0;
5261 	__u32 supported = 0;
5262 	__u32 enabled = 0;
5263 	__u16 num_handles = 0;
5264 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5265 
5266 	BT_DBG("request for %s", hdev->name);
5267 
5268 	hci_dev_lock(hdev);
5269 
5270 	if (msft_monitor_supported(hdev))
5271 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5272 
5273 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5274 		handles[num_handles++] = monitor->handle;
5275 
5276 	hci_dev_unlock(hdev);
5277 
5278 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5279 	rp = kmalloc(rp_size, GFP_KERNEL);
5280 	if (!rp)
5281 		return -ENOMEM;
5282 
5283 	/* All supported features are currently enabled */
5284 	enabled = supported;
5285 
5286 	rp->supported_features = cpu_to_le32(supported);
5287 	rp->enabled_features = cpu_to_le32(enabled);
5288 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5289 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5290 	rp->num_handles = cpu_to_le16(num_handles);
5291 	if (num_handles)
5292 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5293 
5294 	err = mgmt_cmd_complete(sk, hdev->id,
5295 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5296 				MGMT_STATUS_SUCCESS, rp, rp_size);
5297 
5298 	kfree(rp);
5299 
5300 	return err;
5301 }
5302 
5303 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5304 						   void *data, int status)
5305 {
5306 	struct mgmt_rp_add_adv_patterns_monitor rp;
5307 	struct mgmt_pending_cmd *cmd = data;
5308 	struct adv_monitor *monitor = cmd->user_data;
5309 
5310 	hci_dev_lock(hdev);
5311 
5312 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5313 
5314 	if (!status) {
5315 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5316 		hdev->adv_monitors_cnt++;
5317 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5318 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5319 		hci_update_passive_scan(hdev);
5320 	}
5321 
5322 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5323 			  mgmt_status(status), &rp, sizeof(rp));
5324 	mgmt_pending_remove(cmd);
5325 
5326 	hci_dev_unlock(hdev);
5327 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5328 		   rp.monitor_handle, status);
5329 }
5330 
5331 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5332 {
5333 	struct mgmt_pending_cmd *cmd = data;
5334 	struct adv_monitor *monitor = cmd->user_data;
5335 
5336 	return hci_add_adv_monitor(hdev, monitor);
5337 }
5338 
5339 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5340 				      struct adv_monitor *m, u8 status,
5341 				      void *data, u16 len, u16 op)
5342 {
5343 	struct mgmt_pending_cmd *cmd;
5344 	int err;
5345 
5346 	hci_dev_lock(hdev);
5347 
5348 	if (status)
5349 		goto unlock;
5350 
5351 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5352 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5353 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5354 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5355 		status = MGMT_STATUS_BUSY;
5356 		goto unlock;
5357 	}
5358 
5359 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5360 	if (!cmd) {
5361 		status = MGMT_STATUS_NO_RESOURCES;
5362 		goto unlock;
5363 	}
5364 
5365 	cmd->user_data = m;
5366 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5367 				 mgmt_add_adv_patterns_monitor_complete);
5368 	if (err) {
5369 		if (err == -ENOMEM)
5370 			status = MGMT_STATUS_NO_RESOURCES;
5371 		else
5372 			status = MGMT_STATUS_FAILED;
5373 
5374 		goto unlock;
5375 	}
5376 
5377 	hci_dev_unlock(hdev);
5378 
5379 	return 0;
5380 
5381 unlock:
5382 	hci_free_adv_monitor(hdev, m);
5383 	hci_dev_unlock(hdev);
5384 	return mgmt_cmd_status(sk, hdev->id, op, status);
5385 }
5386 
5387 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5388 				   struct mgmt_adv_rssi_thresholds *rssi)
5389 {
5390 	if (rssi) {
5391 		m->rssi.low_threshold = rssi->low_threshold;
5392 		m->rssi.low_threshold_timeout =
5393 		    __le16_to_cpu(rssi->low_threshold_timeout);
5394 		m->rssi.high_threshold = rssi->high_threshold;
5395 		m->rssi.high_threshold_timeout =
5396 		    __le16_to_cpu(rssi->high_threshold_timeout);
5397 		m->rssi.sampling_period = rssi->sampling_period;
5398 	} else {
5399 		/* Default values. These numbers are the least constricting
5400 		 * parameters for MSFT API to work, so it behaves as if there
5401 		 * are no rssi parameter to consider. May need to be changed
5402 		 * if other API are to be supported.
5403 		 */
5404 		m->rssi.low_threshold = -127;
5405 		m->rssi.low_threshold_timeout = 60;
5406 		m->rssi.high_threshold = -127;
5407 		m->rssi.high_threshold_timeout = 0;
5408 		m->rssi.sampling_period = 0;
5409 	}
5410 }
5411 
5412 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5413 				    struct mgmt_adv_pattern *patterns)
5414 {
5415 	u8 offset = 0, length = 0;
5416 	struct adv_pattern *p = NULL;
5417 	int i;
5418 
5419 	for (i = 0; i < pattern_count; i++) {
5420 		offset = patterns[i].offset;
5421 		length = patterns[i].length;
5422 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5423 		    length > HCI_MAX_EXT_AD_LENGTH ||
5424 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5425 			return MGMT_STATUS_INVALID_PARAMS;
5426 
5427 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5428 		if (!p)
5429 			return MGMT_STATUS_NO_RESOURCES;
5430 
5431 		p->ad_type = patterns[i].ad_type;
5432 		p->offset = patterns[i].offset;
5433 		p->length = patterns[i].length;
5434 		memcpy(p->value, patterns[i].value, p->length);
5435 
5436 		INIT_LIST_HEAD(&p->list);
5437 		list_add(&p->list, &m->patterns);
5438 	}
5439 
5440 	return MGMT_STATUS_SUCCESS;
5441 }
5442 
5443 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5444 				    void *data, u16 len)
5445 {
5446 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5447 	struct adv_monitor *m = NULL;
5448 	u8 status = MGMT_STATUS_SUCCESS;
5449 	size_t expected_size = sizeof(*cp);
5450 
5451 	BT_DBG("request for %s", hdev->name);
5452 
5453 	if (len <= sizeof(*cp)) {
5454 		status = MGMT_STATUS_INVALID_PARAMS;
5455 		goto done;
5456 	}
5457 
5458 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5459 	if (len != expected_size) {
5460 		status = MGMT_STATUS_INVALID_PARAMS;
5461 		goto done;
5462 	}
5463 
5464 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5465 	if (!m) {
5466 		status = MGMT_STATUS_NO_RESOURCES;
5467 		goto done;
5468 	}
5469 
5470 	INIT_LIST_HEAD(&m->patterns);
5471 
5472 	parse_adv_monitor_rssi(m, NULL);
5473 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5474 
5475 done:
5476 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5477 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5478 }
5479 
5480 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5481 					 void *data, u16 len)
5482 {
5483 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5484 	struct adv_monitor *m = NULL;
5485 	u8 status = MGMT_STATUS_SUCCESS;
5486 	size_t expected_size = sizeof(*cp);
5487 
5488 	BT_DBG("request for %s", hdev->name);
5489 
5490 	if (len <= sizeof(*cp)) {
5491 		status = MGMT_STATUS_INVALID_PARAMS;
5492 		goto done;
5493 	}
5494 
5495 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5496 	if (len != expected_size) {
5497 		status = MGMT_STATUS_INVALID_PARAMS;
5498 		goto done;
5499 	}
5500 
5501 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5502 	if (!m) {
5503 		status = MGMT_STATUS_NO_RESOURCES;
5504 		goto done;
5505 	}
5506 
5507 	INIT_LIST_HEAD(&m->patterns);
5508 
5509 	parse_adv_monitor_rssi(m, &cp->rssi);
5510 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5511 
5512 done:
5513 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5514 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5515 }
5516 
5517 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5518 					     void *data, int status)
5519 {
5520 	struct mgmt_rp_remove_adv_monitor rp;
5521 	struct mgmt_pending_cmd *cmd = data;
5522 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5523 
5524 	hci_dev_lock(hdev);
5525 
5526 	rp.monitor_handle = cp->monitor_handle;
5527 
5528 	if (!status)
5529 		hci_update_passive_scan(hdev);
5530 
5531 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5532 			  mgmt_status(status), &rp, sizeof(rp));
5533 	mgmt_pending_remove(cmd);
5534 
5535 	hci_dev_unlock(hdev);
5536 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5537 		   rp.monitor_handle, status);
5538 }
5539 
5540 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5541 {
5542 	struct mgmt_pending_cmd *cmd = data;
5543 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5544 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5545 
5546 	if (!handle)
5547 		return hci_remove_all_adv_monitor(hdev);
5548 
5549 	return hci_remove_single_adv_monitor(hdev, handle);
5550 }
5551 
5552 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5553 			      void *data, u16 len)
5554 {
5555 	struct mgmt_pending_cmd *cmd;
5556 	int err, status;
5557 
5558 	hci_dev_lock(hdev);
5559 
5560 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5561 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5562 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5563 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5564 		status = MGMT_STATUS_BUSY;
5565 		goto unlock;
5566 	}
5567 
5568 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5569 	if (!cmd) {
5570 		status = MGMT_STATUS_NO_RESOURCES;
5571 		goto unlock;
5572 	}
5573 
5574 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5575 				  mgmt_remove_adv_monitor_complete);
5576 
5577 	if (err) {
5578 		mgmt_pending_remove(cmd);
5579 
5580 		if (err == -ENOMEM)
5581 			status = MGMT_STATUS_NO_RESOURCES;
5582 		else
5583 			status = MGMT_STATUS_FAILED;
5584 
5585 		goto unlock;
5586 	}
5587 
5588 	hci_dev_unlock(hdev);
5589 
5590 	return 0;
5591 
5592 unlock:
5593 	hci_dev_unlock(hdev);
5594 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5595 			       status);
5596 }
5597 
5598 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5599 {
5600 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5601 	size_t rp_size = sizeof(mgmt_rp);
5602 	struct mgmt_pending_cmd *cmd = data;
5603 	struct sk_buff *skb = cmd->skb;
5604 	u8 status = mgmt_status(err);
5605 
5606 	if (!status) {
5607 		if (!skb)
5608 			status = MGMT_STATUS_FAILED;
5609 		else if (IS_ERR(skb))
5610 			status = mgmt_status(PTR_ERR(skb));
5611 		else
5612 			status = mgmt_status(skb->data[0]);
5613 	}
5614 
5615 	bt_dev_dbg(hdev, "status %d", status);
5616 
5617 	if (status) {
5618 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5619 		goto remove;
5620 	}
5621 
5622 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5623 
5624 	if (!bredr_sc_enabled(hdev)) {
5625 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5626 
5627 		if (skb->len < sizeof(*rp)) {
5628 			mgmt_cmd_status(cmd->sk, hdev->id,
5629 					MGMT_OP_READ_LOCAL_OOB_DATA,
5630 					MGMT_STATUS_FAILED);
5631 			goto remove;
5632 		}
5633 
5634 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5635 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5636 
5637 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5638 	} else {
5639 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5640 
5641 		if (skb->len < sizeof(*rp)) {
5642 			mgmt_cmd_status(cmd->sk, hdev->id,
5643 					MGMT_OP_READ_LOCAL_OOB_DATA,
5644 					MGMT_STATUS_FAILED);
5645 			goto remove;
5646 		}
5647 
5648 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5649 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5650 
5651 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5652 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5653 	}
5654 
5655 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5656 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5657 
5658 remove:
5659 	if (skb && !IS_ERR(skb))
5660 		kfree_skb(skb);
5661 
5662 	mgmt_pending_free(cmd);
5663 }
5664 
5665 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5666 {
5667 	struct mgmt_pending_cmd *cmd = data;
5668 
5669 	if (bredr_sc_enabled(hdev))
5670 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5671 	else
5672 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5673 
5674 	if (IS_ERR(cmd->skb))
5675 		return PTR_ERR(cmd->skb);
5676 	else
5677 		return 0;
5678 }
5679 
5680 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5681 			       void *data, u16 data_len)
5682 {
5683 	struct mgmt_pending_cmd *cmd;
5684 	int err;
5685 
5686 	bt_dev_dbg(hdev, "sock %p", sk);
5687 
5688 	hci_dev_lock(hdev);
5689 
5690 	if (!hdev_is_powered(hdev)) {
5691 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5692 				      MGMT_STATUS_NOT_POWERED);
5693 		goto unlock;
5694 	}
5695 
5696 	if (!lmp_ssp_capable(hdev)) {
5697 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5698 				      MGMT_STATUS_NOT_SUPPORTED);
5699 		goto unlock;
5700 	}
5701 
5702 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5703 	if (!cmd)
5704 		err = -ENOMEM;
5705 	else
5706 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5707 					 read_local_oob_data_complete);
5708 
5709 	if (err < 0) {
5710 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5711 				      MGMT_STATUS_FAILED);
5712 
5713 		if (cmd)
5714 			mgmt_pending_free(cmd);
5715 	}
5716 
5717 unlock:
5718 	hci_dev_unlock(hdev);
5719 	return err;
5720 }
5721 
5722 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5723 			       void *data, u16 len)
5724 {
5725 	struct mgmt_addr_info *addr = data;
5726 	int err;
5727 
5728 	bt_dev_dbg(hdev, "sock %p", sk);
5729 
5730 	if (!bdaddr_type_is_valid(addr->type))
5731 		return mgmt_cmd_complete(sk, hdev->id,
5732 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5733 					 MGMT_STATUS_INVALID_PARAMS,
5734 					 addr, sizeof(*addr));
5735 
5736 	hci_dev_lock(hdev);
5737 
5738 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5739 		struct mgmt_cp_add_remote_oob_data *cp = data;
5740 		u8 status;
5741 
5742 		if (cp->addr.type != BDADDR_BREDR) {
5743 			err = mgmt_cmd_complete(sk, hdev->id,
5744 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5745 						MGMT_STATUS_INVALID_PARAMS,
5746 						&cp->addr, sizeof(cp->addr));
5747 			goto unlock;
5748 		}
5749 
5750 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5751 					      cp->addr.type, cp->hash,
5752 					      cp->rand, NULL, NULL);
5753 		if (err < 0)
5754 			status = MGMT_STATUS_FAILED;
5755 		else
5756 			status = MGMT_STATUS_SUCCESS;
5757 
5758 		err = mgmt_cmd_complete(sk, hdev->id,
5759 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5760 					&cp->addr, sizeof(cp->addr));
5761 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5762 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5763 		u8 *rand192, *hash192, *rand256, *hash256;
5764 		u8 status;
5765 
5766 		if (bdaddr_type_is_le(cp->addr.type)) {
5767 			/* Enforce zero-valued 192-bit parameters as
5768 			 * long as legacy SMP OOB isn't implemented.
5769 			 */
5770 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5771 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5772 				err = mgmt_cmd_complete(sk, hdev->id,
5773 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5774 							MGMT_STATUS_INVALID_PARAMS,
5775 							addr, sizeof(*addr));
5776 				goto unlock;
5777 			}
5778 
5779 			rand192 = NULL;
5780 			hash192 = NULL;
5781 		} else {
5782 			/* In case one of the P-192 values is set to zero,
5783 			 * then just disable OOB data for P-192.
5784 			 */
5785 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5786 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5787 				rand192 = NULL;
5788 				hash192 = NULL;
5789 			} else {
5790 				rand192 = cp->rand192;
5791 				hash192 = cp->hash192;
5792 			}
5793 		}
5794 
5795 		/* In case one of the P-256 values is set to zero, then just
5796 		 * disable OOB data for P-256.
5797 		 */
5798 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5799 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5800 			rand256 = NULL;
5801 			hash256 = NULL;
5802 		} else {
5803 			rand256 = cp->rand256;
5804 			hash256 = cp->hash256;
5805 		}
5806 
5807 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5808 					      cp->addr.type, hash192, rand192,
5809 					      hash256, rand256);
5810 		if (err < 0)
5811 			status = MGMT_STATUS_FAILED;
5812 		else
5813 			status = MGMT_STATUS_SUCCESS;
5814 
5815 		err = mgmt_cmd_complete(sk, hdev->id,
5816 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5817 					status, &cp->addr, sizeof(cp->addr));
5818 	} else {
5819 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5820 			   len);
5821 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5822 				      MGMT_STATUS_INVALID_PARAMS);
5823 	}
5824 
5825 unlock:
5826 	hci_dev_unlock(hdev);
5827 	return err;
5828 }
5829 
5830 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5831 				  void *data, u16 len)
5832 {
5833 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5834 	u8 status;
5835 	int err;
5836 
5837 	bt_dev_dbg(hdev, "sock %p", sk);
5838 
5839 	if (cp->addr.type != BDADDR_BREDR)
5840 		return mgmt_cmd_complete(sk, hdev->id,
5841 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5842 					 MGMT_STATUS_INVALID_PARAMS,
5843 					 &cp->addr, sizeof(cp->addr));
5844 
5845 	hci_dev_lock(hdev);
5846 
5847 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5848 		hci_remote_oob_data_clear(hdev);
5849 		status = MGMT_STATUS_SUCCESS;
5850 		goto done;
5851 	}
5852 
5853 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5854 	if (err < 0)
5855 		status = MGMT_STATUS_INVALID_PARAMS;
5856 	else
5857 		status = MGMT_STATUS_SUCCESS;
5858 
5859 done:
5860 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5861 				status, &cp->addr, sizeof(cp->addr));
5862 
5863 	hci_dev_unlock(hdev);
5864 	return err;
5865 }
5866 
5867 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5868 {
5869 	struct mgmt_pending_cmd *cmd;
5870 
5871 	bt_dev_dbg(hdev, "status %u", status);
5872 
5873 	hci_dev_lock(hdev);
5874 
5875 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5876 	if (!cmd)
5877 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5878 
5879 	if (!cmd)
5880 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5881 
5882 	if (cmd) {
5883 		cmd->cmd_complete(cmd, mgmt_status(status));
5884 		mgmt_pending_remove(cmd);
5885 	}
5886 
5887 	hci_dev_unlock(hdev);
5888 }
5889 
5890 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5891 				    uint8_t *mgmt_status)
5892 {
5893 	switch (type) {
5894 	case DISCOV_TYPE_LE:
5895 		*mgmt_status = mgmt_le_support(hdev);
5896 		if (*mgmt_status)
5897 			return false;
5898 		break;
5899 	case DISCOV_TYPE_INTERLEAVED:
5900 		*mgmt_status = mgmt_le_support(hdev);
5901 		if (*mgmt_status)
5902 			return false;
5903 		fallthrough;
5904 	case DISCOV_TYPE_BREDR:
5905 		*mgmt_status = mgmt_bredr_support(hdev);
5906 		if (*mgmt_status)
5907 			return false;
5908 		break;
5909 	default:
5910 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5911 		return false;
5912 	}
5913 
5914 	return true;
5915 }
5916 
5917 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5918 {
5919 	struct mgmt_pending_cmd *cmd = data;
5920 
5921 	bt_dev_dbg(hdev, "err %d", err);
5922 
5923 	if (err == -ECANCELED)
5924 		return;
5925 
5926 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5927 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5928 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5929 		return;
5930 
5931 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5932 			  cmd->param, 1);
5933 	mgmt_pending_remove(cmd);
5934 
5935 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5936 				DISCOVERY_FINDING);
5937 }
5938 
5939 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5940 {
5941 	return hci_start_discovery_sync(hdev);
5942 }
5943 
5944 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5945 				    u16 op, void *data, u16 len)
5946 {
5947 	struct mgmt_cp_start_discovery *cp = data;
5948 	struct mgmt_pending_cmd *cmd;
5949 	u8 status;
5950 	int err;
5951 
5952 	bt_dev_dbg(hdev, "sock %p", sk);
5953 
5954 	hci_dev_lock(hdev);
5955 
5956 	if (!hdev_is_powered(hdev)) {
5957 		err = mgmt_cmd_complete(sk, hdev->id, op,
5958 					MGMT_STATUS_NOT_POWERED,
5959 					&cp->type, sizeof(cp->type));
5960 		goto failed;
5961 	}
5962 
5963 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5964 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5965 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5966 					&cp->type, sizeof(cp->type));
5967 		goto failed;
5968 	}
5969 
5970 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5971 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5972 					&cp->type, sizeof(cp->type));
5973 		goto failed;
5974 	}
5975 
5976 	/* Can't start discovery when it is paused */
5977 	if (hdev->discovery_paused) {
5978 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5979 					&cp->type, sizeof(cp->type));
5980 		goto failed;
5981 	}
5982 
5983 	/* Clear the discovery filter first to free any previously
5984 	 * allocated memory for the UUID list.
5985 	 */
5986 	hci_discovery_filter_clear(hdev);
5987 
5988 	hdev->discovery.type = cp->type;
5989 	hdev->discovery.report_invalid_rssi = false;
5990 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5991 		hdev->discovery.limited = true;
5992 	else
5993 		hdev->discovery.limited = false;
5994 
5995 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5996 	if (!cmd) {
5997 		err = -ENOMEM;
5998 		goto failed;
5999 	}
6000 
6001 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6002 				 start_discovery_complete);
6003 	if (err < 0) {
6004 		mgmt_pending_remove(cmd);
6005 		goto failed;
6006 	}
6007 
6008 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6009 
6010 failed:
6011 	hci_dev_unlock(hdev);
6012 	return err;
6013 }
6014 
6015 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6016 			   void *data, u16 len)
6017 {
6018 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6019 					data, len);
6020 }
6021 
6022 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6023 				   void *data, u16 len)
6024 {
6025 	return start_discovery_internal(sk, hdev,
6026 					MGMT_OP_START_LIMITED_DISCOVERY,
6027 					data, len);
6028 }
6029 
6030 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6031 				   void *data, u16 len)
6032 {
6033 	struct mgmt_cp_start_service_discovery *cp = data;
6034 	struct mgmt_pending_cmd *cmd;
6035 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6036 	u16 uuid_count, expected_len;
6037 	u8 status;
6038 	int err;
6039 
6040 	bt_dev_dbg(hdev, "sock %p", sk);
6041 
6042 	hci_dev_lock(hdev);
6043 
6044 	if (!hdev_is_powered(hdev)) {
6045 		err = mgmt_cmd_complete(sk, hdev->id,
6046 					MGMT_OP_START_SERVICE_DISCOVERY,
6047 					MGMT_STATUS_NOT_POWERED,
6048 					&cp->type, sizeof(cp->type));
6049 		goto failed;
6050 	}
6051 
6052 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6053 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6054 		err = mgmt_cmd_complete(sk, hdev->id,
6055 					MGMT_OP_START_SERVICE_DISCOVERY,
6056 					MGMT_STATUS_BUSY, &cp->type,
6057 					sizeof(cp->type));
6058 		goto failed;
6059 	}
6060 
6061 	if (hdev->discovery_paused) {
6062 		err = mgmt_cmd_complete(sk, hdev->id,
6063 					MGMT_OP_START_SERVICE_DISCOVERY,
6064 					MGMT_STATUS_BUSY, &cp->type,
6065 					sizeof(cp->type));
6066 		goto failed;
6067 	}
6068 
6069 	uuid_count = __le16_to_cpu(cp->uuid_count);
6070 	if (uuid_count > max_uuid_count) {
6071 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6072 			   uuid_count);
6073 		err = mgmt_cmd_complete(sk, hdev->id,
6074 					MGMT_OP_START_SERVICE_DISCOVERY,
6075 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6076 					sizeof(cp->type));
6077 		goto failed;
6078 	}
6079 
6080 	expected_len = sizeof(*cp) + uuid_count * 16;
6081 	if (expected_len != len) {
6082 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6083 			   expected_len, len);
6084 		err = mgmt_cmd_complete(sk, hdev->id,
6085 					MGMT_OP_START_SERVICE_DISCOVERY,
6086 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6087 					sizeof(cp->type));
6088 		goto failed;
6089 	}
6090 
6091 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6092 		err = mgmt_cmd_complete(sk, hdev->id,
6093 					MGMT_OP_START_SERVICE_DISCOVERY,
6094 					status, &cp->type, sizeof(cp->type));
6095 		goto failed;
6096 	}
6097 
6098 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6099 			       hdev, data, len);
6100 	if (!cmd) {
6101 		err = -ENOMEM;
6102 		goto failed;
6103 	}
6104 
6105 	/* Clear the discovery filter first to free any previously
6106 	 * allocated memory for the UUID list.
6107 	 */
6108 	hci_discovery_filter_clear(hdev);
6109 
6110 	hdev->discovery.result_filtering = true;
6111 	hdev->discovery.type = cp->type;
6112 	hdev->discovery.rssi = cp->rssi;
6113 	hdev->discovery.uuid_count = uuid_count;
6114 
6115 	if (uuid_count > 0) {
6116 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6117 						GFP_KERNEL);
6118 		if (!hdev->discovery.uuids) {
6119 			err = mgmt_cmd_complete(sk, hdev->id,
6120 						MGMT_OP_START_SERVICE_DISCOVERY,
6121 						MGMT_STATUS_FAILED,
6122 						&cp->type, sizeof(cp->type));
6123 			mgmt_pending_remove(cmd);
6124 			goto failed;
6125 		}
6126 	}
6127 
6128 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6129 				 start_discovery_complete);
6130 	if (err < 0) {
6131 		mgmt_pending_remove(cmd);
6132 		goto failed;
6133 	}
6134 
6135 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6136 
6137 failed:
6138 	hci_dev_unlock(hdev);
6139 	return err;
6140 }
6141 
6142 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6143 {
6144 	struct mgmt_pending_cmd *cmd;
6145 
6146 	bt_dev_dbg(hdev, "status %u", status);
6147 
6148 	hci_dev_lock(hdev);
6149 
6150 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6151 	if (cmd) {
6152 		cmd->cmd_complete(cmd, mgmt_status(status));
6153 		mgmt_pending_remove(cmd);
6154 	}
6155 
6156 	hci_dev_unlock(hdev);
6157 }
6158 
6159 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6160 {
6161 	struct mgmt_pending_cmd *cmd = data;
6162 
6163 	if (err == -ECANCELED ||
6164 	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6165 		return;
6166 
6167 	bt_dev_dbg(hdev, "err %d", err);
6168 
6169 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6170 			  cmd->param, 1);
6171 	mgmt_pending_remove(cmd);
6172 
6173 	if (!err)
6174 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6175 }
6176 
6177 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6178 {
6179 	return hci_stop_discovery_sync(hdev);
6180 }
6181 
6182 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6183 			  u16 len)
6184 {
6185 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6186 	struct mgmt_pending_cmd *cmd;
6187 	int err;
6188 
6189 	bt_dev_dbg(hdev, "sock %p", sk);
6190 
6191 	hci_dev_lock(hdev);
6192 
6193 	if (!hci_discovery_active(hdev)) {
6194 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6195 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6196 					sizeof(mgmt_cp->type));
6197 		goto unlock;
6198 	}
6199 
6200 	if (hdev->discovery.type != mgmt_cp->type) {
6201 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6202 					MGMT_STATUS_INVALID_PARAMS,
6203 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6204 		goto unlock;
6205 	}
6206 
6207 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6208 	if (!cmd) {
6209 		err = -ENOMEM;
6210 		goto unlock;
6211 	}
6212 
6213 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6214 				 stop_discovery_complete);
6215 	if (err < 0) {
6216 		mgmt_pending_remove(cmd);
6217 		goto unlock;
6218 	}
6219 
6220 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6221 
6222 unlock:
6223 	hci_dev_unlock(hdev);
6224 	return err;
6225 }
6226 
6227 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6228 			u16 len)
6229 {
6230 	struct mgmt_cp_confirm_name *cp = data;
6231 	struct inquiry_entry *e;
6232 	int err;
6233 
6234 	bt_dev_dbg(hdev, "sock %p", sk);
6235 
6236 	hci_dev_lock(hdev);
6237 
6238 	if (!hci_discovery_active(hdev)) {
6239 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6240 					MGMT_STATUS_FAILED, &cp->addr,
6241 					sizeof(cp->addr));
6242 		goto failed;
6243 	}
6244 
6245 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6246 	if (!e) {
6247 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6248 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6249 					sizeof(cp->addr));
6250 		goto failed;
6251 	}
6252 
6253 	if (cp->name_known) {
6254 		e->name_state = NAME_KNOWN;
6255 		list_del(&e->list);
6256 	} else {
6257 		e->name_state = NAME_NEEDED;
6258 		hci_inquiry_cache_update_resolve(hdev, e);
6259 	}
6260 
6261 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6262 				&cp->addr, sizeof(cp->addr));
6263 
6264 failed:
6265 	hci_dev_unlock(hdev);
6266 	return err;
6267 }
6268 
6269 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6270 			u16 len)
6271 {
6272 	struct mgmt_cp_block_device *cp = data;
6273 	u8 status;
6274 	int err;
6275 
6276 	bt_dev_dbg(hdev, "sock %p", sk);
6277 
6278 	if (!bdaddr_type_is_valid(cp->addr.type))
6279 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6280 					 MGMT_STATUS_INVALID_PARAMS,
6281 					 &cp->addr, sizeof(cp->addr));
6282 
6283 	hci_dev_lock(hdev);
6284 
6285 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6286 				  cp->addr.type);
6287 	if (err < 0) {
6288 		status = MGMT_STATUS_FAILED;
6289 		goto done;
6290 	}
6291 
6292 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6293 		   sk);
6294 	status = MGMT_STATUS_SUCCESS;
6295 
6296 done:
6297 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6298 				&cp->addr, sizeof(cp->addr));
6299 
6300 	hci_dev_unlock(hdev);
6301 
6302 	return err;
6303 }
6304 
6305 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6306 			  u16 len)
6307 {
6308 	struct mgmt_cp_unblock_device *cp = data;
6309 	u8 status;
6310 	int err;
6311 
6312 	bt_dev_dbg(hdev, "sock %p", sk);
6313 
6314 	if (!bdaddr_type_is_valid(cp->addr.type))
6315 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6316 					 MGMT_STATUS_INVALID_PARAMS,
6317 					 &cp->addr, sizeof(cp->addr));
6318 
6319 	hci_dev_lock(hdev);
6320 
6321 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6322 				  cp->addr.type);
6323 	if (err < 0) {
6324 		status = MGMT_STATUS_INVALID_PARAMS;
6325 		goto done;
6326 	}
6327 
6328 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6329 		   sk);
6330 	status = MGMT_STATUS_SUCCESS;
6331 
6332 done:
6333 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6334 				&cp->addr, sizeof(cp->addr));
6335 
6336 	hci_dev_unlock(hdev);
6337 
6338 	return err;
6339 }
6340 
6341 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6342 {
6343 	return hci_update_eir_sync(hdev);
6344 }
6345 
6346 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6347 			 u16 len)
6348 {
6349 	struct mgmt_cp_set_device_id *cp = data;
6350 	int err;
6351 	__u16 source;
6352 
6353 	bt_dev_dbg(hdev, "sock %p", sk);
6354 
6355 	source = __le16_to_cpu(cp->source);
6356 
6357 	if (source > 0x0002)
6358 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6359 				       MGMT_STATUS_INVALID_PARAMS);
6360 
6361 	hci_dev_lock(hdev);
6362 
6363 	hdev->devid_source = source;
6364 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6365 	hdev->devid_product = __le16_to_cpu(cp->product);
6366 	hdev->devid_version = __le16_to_cpu(cp->version);
6367 
6368 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6369 				NULL, 0);
6370 
6371 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6372 
6373 	hci_dev_unlock(hdev);
6374 
6375 	return err;
6376 }
6377 
6378 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6379 {
6380 	if (err)
6381 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6382 	else
6383 		bt_dev_dbg(hdev, "status %d", err);
6384 }
6385 
6386 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6387 {
6388 	struct cmd_lookup match = { NULL, hdev };
6389 	u8 instance;
6390 	struct adv_info *adv_instance;
6391 	u8 status = mgmt_status(err);
6392 
6393 	if (status) {
6394 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6395 				     cmd_status_rsp, &status);
6396 		return;
6397 	}
6398 
6399 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6400 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6401 	else
6402 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6403 
6404 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6405 			     &match);
6406 
6407 	new_settings(hdev, match.sk);
6408 
6409 	if (match.sk)
6410 		sock_put(match.sk);
6411 
6412 	/* If "Set Advertising" was just disabled and instance advertising was
6413 	 * set up earlier, then re-enable multi-instance advertising.
6414 	 */
6415 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6416 	    list_empty(&hdev->adv_instances))
6417 		return;
6418 
6419 	instance = hdev->cur_adv_instance;
6420 	if (!instance) {
6421 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6422 							struct adv_info, list);
6423 		if (!adv_instance)
6424 			return;
6425 
6426 		instance = adv_instance->instance;
6427 	}
6428 
6429 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6430 
6431 	enable_advertising_instance(hdev, err);
6432 }
6433 
6434 static int set_adv_sync(struct hci_dev *hdev, void *data)
6435 {
6436 	struct mgmt_pending_cmd *cmd = data;
6437 	struct mgmt_mode *cp = cmd->param;
6438 	u8 val = !!cp->val;
6439 
6440 	if (cp->val == 0x02)
6441 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6442 	else
6443 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6444 
6445 	cancel_adv_timeout(hdev);
6446 
6447 	if (val) {
6448 		/* Switch to instance "0" for the Set Advertising setting.
6449 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6450 		 * HCI_ADVERTISING flag is not yet set.
6451 		 */
6452 		hdev->cur_adv_instance = 0x00;
6453 
6454 		if (ext_adv_capable(hdev)) {
6455 			hci_start_ext_adv_sync(hdev, 0x00);
6456 		} else {
6457 			hci_update_adv_data_sync(hdev, 0x00);
6458 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6459 			hci_enable_advertising_sync(hdev);
6460 		}
6461 	} else {
6462 		hci_disable_advertising_sync(hdev);
6463 	}
6464 
6465 	return 0;
6466 }
6467 
6468 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6469 			   u16 len)
6470 {
6471 	struct mgmt_mode *cp = data;
6472 	struct mgmt_pending_cmd *cmd;
6473 	u8 val, status;
6474 	int err;
6475 
6476 	bt_dev_dbg(hdev, "sock %p", sk);
6477 
6478 	status = mgmt_le_support(hdev);
6479 	if (status)
6480 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6481 				       status);
6482 
6483 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6484 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6485 				       MGMT_STATUS_INVALID_PARAMS);
6486 
6487 	if (hdev->advertising_paused)
6488 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6489 				       MGMT_STATUS_BUSY);
6490 
6491 	hci_dev_lock(hdev);
6492 
6493 	val = !!cp->val;
6494 
6495 	/* The following conditions are ones which mean that we should
6496 	 * not do any HCI communication but directly send a mgmt
6497 	 * response to user space (after toggling the flag if
6498 	 * necessary).
6499 	 */
6500 	if (!hdev_is_powered(hdev) ||
6501 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6502 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6503 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6504 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6505 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6506 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6507 		bool changed;
6508 
6509 		if (cp->val) {
6510 			hdev->cur_adv_instance = 0x00;
6511 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6512 			if (cp->val == 0x02)
6513 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6514 			else
6515 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6516 		} else {
6517 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6518 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6519 		}
6520 
6521 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6522 		if (err < 0)
6523 			goto unlock;
6524 
6525 		if (changed)
6526 			err = new_settings(hdev, sk);
6527 
6528 		goto unlock;
6529 	}
6530 
6531 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6532 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6533 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6534 				      MGMT_STATUS_BUSY);
6535 		goto unlock;
6536 	}
6537 
6538 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6539 	if (!cmd)
6540 		err = -ENOMEM;
6541 	else
6542 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6543 					 set_advertising_complete);
6544 
6545 	if (err < 0 && cmd)
6546 		mgmt_pending_remove(cmd);
6547 
6548 unlock:
6549 	hci_dev_unlock(hdev);
6550 	return err;
6551 }
6552 
6553 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6554 			      void *data, u16 len)
6555 {
6556 	struct mgmt_cp_set_static_address *cp = data;
6557 	int err;
6558 
6559 	bt_dev_dbg(hdev, "sock %p", sk);
6560 
6561 	if (!lmp_le_capable(hdev))
6562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6563 				       MGMT_STATUS_NOT_SUPPORTED);
6564 
6565 	if (hdev_is_powered(hdev))
6566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6567 				       MGMT_STATUS_REJECTED);
6568 
6569 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6570 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6571 			return mgmt_cmd_status(sk, hdev->id,
6572 					       MGMT_OP_SET_STATIC_ADDRESS,
6573 					       MGMT_STATUS_INVALID_PARAMS);
6574 
6575 		/* Two most significant bits shall be set */
6576 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6577 			return mgmt_cmd_status(sk, hdev->id,
6578 					       MGMT_OP_SET_STATIC_ADDRESS,
6579 					       MGMT_STATUS_INVALID_PARAMS);
6580 	}
6581 
6582 	hci_dev_lock(hdev);
6583 
6584 	bacpy(&hdev->static_addr, &cp->bdaddr);
6585 
6586 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6587 	if (err < 0)
6588 		goto unlock;
6589 
6590 	err = new_settings(hdev, sk);
6591 
6592 unlock:
6593 	hci_dev_unlock(hdev);
6594 	return err;
6595 }
6596 
6597 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6598 			   void *data, u16 len)
6599 {
6600 	struct mgmt_cp_set_scan_params *cp = data;
6601 	__u16 interval, window;
6602 	int err;
6603 
6604 	bt_dev_dbg(hdev, "sock %p", sk);
6605 
6606 	if (!lmp_le_capable(hdev))
6607 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6608 				       MGMT_STATUS_NOT_SUPPORTED);
6609 
6610 	interval = __le16_to_cpu(cp->interval);
6611 
6612 	if (interval < 0x0004 || interval > 0x4000)
6613 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6614 				       MGMT_STATUS_INVALID_PARAMS);
6615 
6616 	window = __le16_to_cpu(cp->window);
6617 
6618 	if (window < 0x0004 || window > 0x4000)
6619 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6620 				       MGMT_STATUS_INVALID_PARAMS);
6621 
6622 	if (window > interval)
6623 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6624 				       MGMT_STATUS_INVALID_PARAMS);
6625 
6626 	hci_dev_lock(hdev);
6627 
6628 	hdev->le_scan_interval = interval;
6629 	hdev->le_scan_window = window;
6630 
6631 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6632 				NULL, 0);
6633 
6634 	/* If background scan is running, restart it so new parameters are
6635 	 * loaded.
6636 	 */
6637 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6638 	    hdev->discovery.state == DISCOVERY_STOPPED)
6639 		hci_update_passive_scan(hdev);
6640 
6641 	hci_dev_unlock(hdev);
6642 
6643 	return err;
6644 }
6645 
6646 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6647 {
6648 	struct mgmt_pending_cmd *cmd = data;
6649 
6650 	bt_dev_dbg(hdev, "err %d", err);
6651 
6652 	if (err) {
6653 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6654 				mgmt_status(err));
6655 	} else {
6656 		struct mgmt_mode *cp = cmd->param;
6657 
6658 		if (cp->val)
6659 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6660 		else
6661 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6662 
6663 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6664 		new_settings(hdev, cmd->sk);
6665 	}
6666 
6667 	mgmt_pending_free(cmd);
6668 }
6669 
6670 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6671 {
6672 	struct mgmt_pending_cmd *cmd = data;
6673 	struct mgmt_mode *cp = cmd->param;
6674 
6675 	return hci_write_fast_connectable_sync(hdev, cp->val);
6676 }
6677 
6678 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6679 				void *data, u16 len)
6680 {
6681 	struct mgmt_mode *cp = data;
6682 	struct mgmt_pending_cmd *cmd;
6683 	int err;
6684 
6685 	bt_dev_dbg(hdev, "sock %p", sk);
6686 
6687 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6688 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6689 		return mgmt_cmd_status(sk, hdev->id,
6690 				       MGMT_OP_SET_FAST_CONNECTABLE,
6691 				       MGMT_STATUS_NOT_SUPPORTED);
6692 
6693 	if (cp->val != 0x00 && cp->val != 0x01)
6694 		return mgmt_cmd_status(sk, hdev->id,
6695 				       MGMT_OP_SET_FAST_CONNECTABLE,
6696 				       MGMT_STATUS_INVALID_PARAMS);
6697 
6698 	hci_dev_lock(hdev);
6699 
6700 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6701 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6702 		goto unlock;
6703 	}
6704 
6705 	if (!hdev_is_powered(hdev)) {
6706 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6707 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6708 		new_settings(hdev, sk);
6709 		goto unlock;
6710 	}
6711 
6712 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6713 			       len);
6714 	if (!cmd)
6715 		err = -ENOMEM;
6716 	else
6717 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6718 					 fast_connectable_complete);
6719 
6720 	if (err < 0) {
6721 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6722 				MGMT_STATUS_FAILED);
6723 
6724 		if (cmd)
6725 			mgmt_pending_free(cmd);
6726 	}
6727 
6728 unlock:
6729 	hci_dev_unlock(hdev);
6730 
6731 	return err;
6732 }
6733 
6734 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6735 {
6736 	struct mgmt_pending_cmd *cmd = data;
6737 
6738 	bt_dev_dbg(hdev, "err %d", err);
6739 
6740 	if (err) {
6741 		u8 mgmt_err = mgmt_status(err);
6742 
6743 		/* We need to restore the flag if related HCI commands
6744 		 * failed.
6745 		 */
6746 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6747 
6748 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6749 	} else {
6750 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6751 		new_settings(hdev, cmd->sk);
6752 	}
6753 
6754 	mgmt_pending_free(cmd);
6755 }
6756 
6757 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6758 {
6759 	int status;
6760 
6761 	status = hci_write_fast_connectable_sync(hdev, false);
6762 
6763 	if (!status)
6764 		status = hci_update_scan_sync(hdev);
6765 
6766 	/* Since only the advertising data flags will change, there
6767 	 * is no need to update the scan response data.
6768 	 */
6769 	if (!status)
6770 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6771 
6772 	return status;
6773 }
6774 
6775 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6776 {
6777 	struct mgmt_mode *cp = data;
6778 	struct mgmt_pending_cmd *cmd;
6779 	int err;
6780 
6781 	bt_dev_dbg(hdev, "sock %p", sk);
6782 
6783 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6784 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 				       MGMT_STATUS_NOT_SUPPORTED);
6786 
6787 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789 				       MGMT_STATUS_REJECTED);
6790 
6791 	if (cp->val != 0x00 && cp->val != 0x01)
6792 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6793 				       MGMT_STATUS_INVALID_PARAMS);
6794 
6795 	hci_dev_lock(hdev);
6796 
6797 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6798 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6799 		goto unlock;
6800 	}
6801 
6802 	if (!hdev_is_powered(hdev)) {
6803 		if (!cp->val) {
6804 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6805 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6806 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6807 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6808 		}
6809 
6810 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6811 
6812 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6813 		if (err < 0)
6814 			goto unlock;
6815 
6816 		err = new_settings(hdev, sk);
6817 		goto unlock;
6818 	}
6819 
6820 	/* Reject disabling when powered on */
6821 	if (!cp->val) {
6822 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6823 				      MGMT_STATUS_REJECTED);
6824 		goto unlock;
6825 	} else {
6826 		/* When configuring a dual-mode controller to operate
6827 		 * with LE only and using a static address, then switching
6828 		 * BR/EDR back on is not allowed.
6829 		 *
6830 		 * Dual-mode controllers shall operate with the public
6831 		 * address as its identity address for BR/EDR and LE. So
6832 		 * reject the attempt to create an invalid configuration.
6833 		 *
6834 		 * The same restrictions applies when secure connections
6835 		 * has been enabled. For BR/EDR this is a controller feature
6836 		 * while for LE it is a host stack feature. This means that
6837 		 * switching BR/EDR back on when secure connections has been
6838 		 * enabled is not a supported transaction.
6839 		 */
6840 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6841 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6842 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6843 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6844 					      MGMT_STATUS_REJECTED);
6845 			goto unlock;
6846 		}
6847 	}
6848 
6849 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6850 	if (!cmd)
6851 		err = -ENOMEM;
6852 	else
6853 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6854 					 set_bredr_complete);
6855 
6856 	if (err < 0) {
6857 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6858 				MGMT_STATUS_FAILED);
6859 		if (cmd)
6860 			mgmt_pending_free(cmd);
6861 
6862 		goto unlock;
6863 	}
6864 
6865 	/* We need to flip the bit already here so that
6866 	 * hci_req_update_adv_data generates the correct flags.
6867 	 */
6868 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6869 
6870 unlock:
6871 	hci_dev_unlock(hdev);
6872 	return err;
6873 }
6874 
6875 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6876 {
6877 	struct mgmt_pending_cmd *cmd = data;
6878 	struct mgmt_mode *cp;
6879 
6880 	bt_dev_dbg(hdev, "err %d", err);
6881 
6882 	if (err) {
6883 		u8 mgmt_err = mgmt_status(err);
6884 
6885 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6886 		goto done;
6887 	}
6888 
6889 	cp = cmd->param;
6890 
6891 	switch (cp->val) {
6892 	case 0x00:
6893 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6894 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6895 		break;
6896 	case 0x01:
6897 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6898 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6899 		break;
6900 	case 0x02:
6901 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6902 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6903 		break;
6904 	}
6905 
6906 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6907 	new_settings(hdev, cmd->sk);
6908 
6909 done:
6910 	mgmt_pending_free(cmd);
6911 }
6912 
6913 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6914 {
6915 	struct mgmt_pending_cmd *cmd = data;
6916 	struct mgmt_mode *cp = cmd->param;
6917 	u8 val = !!cp->val;
6918 
6919 	/* Force write of val */
6920 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6921 
6922 	return hci_write_sc_support_sync(hdev, val);
6923 }
6924 
6925 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6926 			   void *data, u16 len)
6927 {
6928 	struct mgmt_mode *cp = data;
6929 	struct mgmt_pending_cmd *cmd;
6930 	u8 val;
6931 	int err;
6932 
6933 	bt_dev_dbg(hdev, "sock %p", sk);
6934 
6935 	if (!lmp_sc_capable(hdev) &&
6936 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6938 				       MGMT_STATUS_NOT_SUPPORTED);
6939 
6940 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6941 	    lmp_sc_capable(hdev) &&
6942 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6943 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6944 				       MGMT_STATUS_REJECTED);
6945 
6946 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6947 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6948 				       MGMT_STATUS_INVALID_PARAMS);
6949 
6950 	hci_dev_lock(hdev);
6951 
6952 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6953 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6954 		bool changed;
6955 
6956 		if (cp->val) {
6957 			changed = !hci_dev_test_and_set_flag(hdev,
6958 							     HCI_SC_ENABLED);
6959 			if (cp->val == 0x02)
6960 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6961 			else
6962 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6963 		} else {
6964 			changed = hci_dev_test_and_clear_flag(hdev,
6965 							      HCI_SC_ENABLED);
6966 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6967 		}
6968 
6969 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6970 		if (err < 0)
6971 			goto failed;
6972 
6973 		if (changed)
6974 			err = new_settings(hdev, sk);
6975 
6976 		goto failed;
6977 	}
6978 
6979 	val = !!cp->val;
6980 
6981 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6982 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6983 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6984 		goto failed;
6985 	}
6986 
6987 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6988 	if (!cmd)
6989 		err = -ENOMEM;
6990 	else
6991 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6992 					 set_secure_conn_complete);
6993 
6994 	if (err < 0) {
6995 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6996 				MGMT_STATUS_FAILED);
6997 		if (cmd)
6998 			mgmt_pending_free(cmd);
6999 	}
7000 
7001 failed:
7002 	hci_dev_unlock(hdev);
7003 	return err;
7004 }
7005 
7006 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
7007 			  void *data, u16 len)
7008 {
7009 	struct mgmt_mode *cp = data;
7010 	bool changed, use_changed;
7011 	int err;
7012 
7013 	bt_dev_dbg(hdev, "sock %p", sk);
7014 
7015 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7016 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7017 				       MGMT_STATUS_INVALID_PARAMS);
7018 
7019 	hci_dev_lock(hdev);
7020 
7021 	if (cp->val)
7022 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7023 	else
7024 		changed = hci_dev_test_and_clear_flag(hdev,
7025 						      HCI_KEEP_DEBUG_KEYS);
7026 
7027 	if (cp->val == 0x02)
7028 		use_changed = !hci_dev_test_and_set_flag(hdev,
7029 							 HCI_USE_DEBUG_KEYS);
7030 	else
7031 		use_changed = hci_dev_test_and_clear_flag(hdev,
7032 							  HCI_USE_DEBUG_KEYS);
7033 
7034 	if (hdev_is_powered(hdev) && use_changed &&
7035 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7036 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7037 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7038 			     sizeof(mode), &mode);
7039 	}
7040 
7041 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7042 	if (err < 0)
7043 		goto unlock;
7044 
7045 	if (changed)
7046 		err = new_settings(hdev, sk);
7047 
7048 unlock:
7049 	hci_dev_unlock(hdev);
7050 	return err;
7051 }
7052 
7053 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7054 		       u16 len)
7055 {
7056 	struct mgmt_cp_set_privacy *cp = cp_data;
7057 	bool changed;
7058 	int err;
7059 
7060 	bt_dev_dbg(hdev, "sock %p", sk);
7061 
7062 	if (!lmp_le_capable(hdev))
7063 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7064 				       MGMT_STATUS_NOT_SUPPORTED);
7065 
7066 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7067 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7068 				       MGMT_STATUS_INVALID_PARAMS);
7069 
7070 	if (hdev_is_powered(hdev))
7071 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7072 				       MGMT_STATUS_REJECTED);
7073 
7074 	hci_dev_lock(hdev);
7075 
7076 	/* If user space supports this command it is also expected to
7077 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7078 	 */
7079 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7080 
7081 	if (cp->privacy) {
7082 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7083 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7084 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7085 		hci_adv_instances_set_rpa_expired(hdev, true);
7086 		if (cp->privacy == 0x02)
7087 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7088 		else
7089 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7090 	} else {
7091 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7092 		memset(hdev->irk, 0, sizeof(hdev->irk));
7093 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7094 		hci_adv_instances_set_rpa_expired(hdev, false);
7095 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7096 	}
7097 
7098 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7099 	if (err < 0)
7100 		goto unlock;
7101 
7102 	if (changed)
7103 		err = new_settings(hdev, sk);
7104 
7105 unlock:
7106 	hci_dev_unlock(hdev);
7107 	return err;
7108 }
7109 
7110 static bool irk_is_valid(struct mgmt_irk_info *irk)
7111 {
7112 	switch (irk->addr.type) {
7113 	case BDADDR_LE_PUBLIC:
7114 		return true;
7115 
7116 	case BDADDR_LE_RANDOM:
7117 		/* Two most significant bits shall be set */
7118 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7119 			return false;
7120 		return true;
7121 	}
7122 
7123 	return false;
7124 }
7125 
7126 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7127 		     u16 len)
7128 {
7129 	struct mgmt_cp_load_irks *cp = cp_data;
7130 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7131 				   sizeof(struct mgmt_irk_info));
7132 	u16 irk_count, expected_len;
7133 	int i, err;
7134 
7135 	bt_dev_dbg(hdev, "sock %p", sk);
7136 
7137 	if (!lmp_le_capable(hdev))
7138 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7139 				       MGMT_STATUS_NOT_SUPPORTED);
7140 
7141 	irk_count = __le16_to_cpu(cp->irk_count);
7142 	if (irk_count > max_irk_count) {
7143 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7144 			   irk_count);
7145 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7146 				       MGMT_STATUS_INVALID_PARAMS);
7147 	}
7148 
7149 	expected_len = struct_size(cp, irks, irk_count);
7150 	if (expected_len != len) {
7151 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7152 			   expected_len, len);
7153 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7154 				       MGMT_STATUS_INVALID_PARAMS);
7155 	}
7156 
7157 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7158 
7159 	for (i = 0; i < irk_count; i++) {
7160 		struct mgmt_irk_info *key = &cp->irks[i];
7161 
7162 		if (!irk_is_valid(key))
7163 			return mgmt_cmd_status(sk, hdev->id,
7164 					       MGMT_OP_LOAD_IRKS,
7165 					       MGMT_STATUS_INVALID_PARAMS);
7166 	}
7167 
7168 	hci_dev_lock(hdev);
7169 
7170 	hci_smp_irks_clear(hdev);
7171 
7172 	for (i = 0; i < irk_count; i++) {
7173 		struct mgmt_irk_info *irk = &cp->irks[i];
7174 
7175 		if (hci_is_blocked_key(hdev,
7176 				       HCI_BLOCKED_KEY_TYPE_IRK,
7177 				       irk->val)) {
7178 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7179 				    &irk->addr.bdaddr);
7180 			continue;
7181 		}
7182 
7183 		hci_add_irk(hdev, &irk->addr.bdaddr,
7184 			    le_addr_type(irk->addr.type), irk->val,
7185 			    BDADDR_ANY);
7186 	}
7187 
7188 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7189 
7190 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7191 
7192 	hci_dev_unlock(hdev);
7193 
7194 	return err;
7195 }
7196 
7197 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7198 {
7199 	if (key->initiator != 0x00 && key->initiator != 0x01)
7200 		return false;
7201 
7202 	switch (key->addr.type) {
7203 	case BDADDR_LE_PUBLIC:
7204 		return true;
7205 
7206 	case BDADDR_LE_RANDOM:
7207 		/* Two most significant bits shall be set */
7208 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7209 			return false;
7210 		return true;
7211 	}
7212 
7213 	return false;
7214 }
7215 
7216 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7217 			       void *cp_data, u16 len)
7218 {
7219 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7220 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7221 				   sizeof(struct mgmt_ltk_info));
7222 	u16 key_count, expected_len;
7223 	int i, err;
7224 
7225 	bt_dev_dbg(hdev, "sock %p", sk);
7226 
7227 	if (!lmp_le_capable(hdev))
7228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7229 				       MGMT_STATUS_NOT_SUPPORTED);
7230 
7231 	key_count = __le16_to_cpu(cp->key_count);
7232 	if (key_count > max_key_count) {
7233 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7234 			   key_count);
7235 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7236 				       MGMT_STATUS_INVALID_PARAMS);
7237 	}
7238 
7239 	expected_len = struct_size(cp, keys, key_count);
7240 	if (expected_len != len) {
7241 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7242 			   expected_len, len);
7243 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7244 				       MGMT_STATUS_INVALID_PARAMS);
7245 	}
7246 
7247 	bt_dev_dbg(hdev, "key_count %u", key_count);
7248 
7249 	hci_dev_lock(hdev);
7250 
7251 	hci_smp_ltks_clear(hdev);
7252 
7253 	for (i = 0; i < key_count; i++) {
7254 		struct mgmt_ltk_info *key = &cp->keys[i];
7255 		u8 type, authenticated;
7256 
7257 		if (hci_is_blocked_key(hdev,
7258 				       HCI_BLOCKED_KEY_TYPE_LTK,
7259 				       key->val)) {
7260 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7261 				    &key->addr.bdaddr);
7262 			continue;
7263 		}
7264 
7265 		if (!ltk_is_valid(key)) {
7266 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7267 				    &key->addr.bdaddr);
7268 			continue;
7269 		}
7270 
7271 		switch (key->type) {
7272 		case MGMT_LTK_UNAUTHENTICATED:
7273 			authenticated = 0x00;
7274 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7275 			break;
7276 		case MGMT_LTK_AUTHENTICATED:
7277 			authenticated = 0x01;
7278 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7279 			break;
7280 		case MGMT_LTK_P256_UNAUTH:
7281 			authenticated = 0x00;
7282 			type = SMP_LTK_P256;
7283 			break;
7284 		case MGMT_LTK_P256_AUTH:
7285 			authenticated = 0x01;
7286 			type = SMP_LTK_P256;
7287 			break;
7288 		case MGMT_LTK_P256_DEBUG:
7289 			authenticated = 0x00;
7290 			type = SMP_LTK_P256_DEBUG;
7291 			fallthrough;
7292 		default:
7293 			continue;
7294 		}
7295 
7296 		hci_add_ltk(hdev, &key->addr.bdaddr,
7297 			    le_addr_type(key->addr.type), type, authenticated,
7298 			    key->val, key->enc_size, key->ediv, key->rand);
7299 	}
7300 
7301 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7302 			   NULL, 0);
7303 
7304 	hci_dev_unlock(hdev);
7305 
7306 	return err;
7307 }
7308 
7309 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7310 {
7311 	struct mgmt_pending_cmd *cmd = data;
7312 	struct hci_conn *conn = cmd->user_data;
7313 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7314 	struct mgmt_rp_get_conn_info rp;
7315 	u8 status;
7316 
7317 	bt_dev_dbg(hdev, "err %d", err);
7318 
7319 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7320 
7321 	status = mgmt_status(err);
7322 	if (status == MGMT_STATUS_SUCCESS) {
7323 		rp.rssi = conn->rssi;
7324 		rp.tx_power = conn->tx_power;
7325 		rp.max_tx_power = conn->max_tx_power;
7326 	} else {
7327 		rp.rssi = HCI_RSSI_INVALID;
7328 		rp.tx_power = HCI_TX_POWER_INVALID;
7329 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7330 	}
7331 
7332 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7333 			  &rp, sizeof(rp));
7334 
7335 	mgmt_pending_free(cmd);
7336 }
7337 
7338 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7339 {
7340 	struct mgmt_pending_cmd *cmd = data;
7341 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7342 	struct hci_conn *conn;
7343 	int err;
7344 	__le16   handle;
7345 
7346 	/* Make sure we are still connected */
7347 	if (cp->addr.type == BDADDR_BREDR)
7348 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7349 					       &cp->addr.bdaddr);
7350 	else
7351 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7352 
7353 	if (!conn || conn->state != BT_CONNECTED)
7354 		return MGMT_STATUS_NOT_CONNECTED;
7355 
7356 	cmd->user_data = conn;
7357 	handle = cpu_to_le16(conn->handle);
7358 
7359 	/* Refresh RSSI each time */
7360 	err = hci_read_rssi_sync(hdev, handle);
7361 
7362 	/* For LE links TX power does not change thus we don't need to
7363 	 * query for it once value is known.
7364 	 */
7365 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7366 		     conn->tx_power == HCI_TX_POWER_INVALID))
7367 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7368 
7369 	/* Max TX power needs to be read only once per connection */
7370 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7371 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7372 
7373 	return err;
7374 }
7375 
7376 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7377 			 u16 len)
7378 {
7379 	struct mgmt_cp_get_conn_info *cp = data;
7380 	struct mgmt_rp_get_conn_info rp;
7381 	struct hci_conn *conn;
7382 	unsigned long conn_info_age;
7383 	int err = 0;
7384 
7385 	bt_dev_dbg(hdev, "sock %p", sk);
7386 
7387 	memset(&rp, 0, sizeof(rp));
7388 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7389 	rp.addr.type = cp->addr.type;
7390 
7391 	if (!bdaddr_type_is_valid(cp->addr.type))
7392 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7393 					 MGMT_STATUS_INVALID_PARAMS,
7394 					 &rp, sizeof(rp));
7395 
7396 	hci_dev_lock(hdev);
7397 
7398 	if (!hdev_is_powered(hdev)) {
7399 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7400 					MGMT_STATUS_NOT_POWERED, &rp,
7401 					sizeof(rp));
7402 		goto unlock;
7403 	}
7404 
7405 	if (cp->addr.type == BDADDR_BREDR)
7406 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7407 					       &cp->addr.bdaddr);
7408 	else
7409 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7410 
7411 	if (!conn || conn->state != BT_CONNECTED) {
7412 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7413 					MGMT_STATUS_NOT_CONNECTED, &rp,
7414 					sizeof(rp));
7415 		goto unlock;
7416 	}
7417 
7418 	/* To avoid client trying to guess when to poll again for information we
7419 	 * calculate conn info age as random value between min/max set in hdev.
7420 	 */
7421 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7422 						 hdev->conn_info_max_age - 1);
7423 
7424 	/* Query controller to refresh cached values if they are too old or were
7425 	 * never read.
7426 	 */
7427 	if (time_after(jiffies, conn->conn_info_timestamp +
7428 		       msecs_to_jiffies(conn_info_age)) ||
7429 	    !conn->conn_info_timestamp) {
7430 		struct mgmt_pending_cmd *cmd;
7431 
7432 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7433 				       len);
7434 		if (!cmd) {
7435 			err = -ENOMEM;
7436 		} else {
7437 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7438 						 cmd, get_conn_info_complete);
7439 		}
7440 
7441 		if (err < 0) {
7442 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7443 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7444 
7445 			if (cmd)
7446 				mgmt_pending_free(cmd);
7447 
7448 			goto unlock;
7449 		}
7450 
7451 		conn->conn_info_timestamp = jiffies;
7452 	} else {
7453 		/* Cache is valid, just reply with values cached in hci_conn */
7454 		rp.rssi = conn->rssi;
7455 		rp.tx_power = conn->tx_power;
7456 		rp.max_tx_power = conn->max_tx_power;
7457 
7458 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7459 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7460 	}
7461 
7462 unlock:
7463 	hci_dev_unlock(hdev);
7464 	return err;
7465 }
7466 
7467 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7468 {
7469 	struct mgmt_pending_cmd *cmd = data;
7470 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7471 	struct mgmt_rp_get_clock_info rp;
7472 	struct hci_conn *conn = cmd->user_data;
7473 	u8 status = mgmt_status(err);
7474 
7475 	bt_dev_dbg(hdev, "err %d", err);
7476 
7477 	memset(&rp, 0, sizeof(rp));
7478 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7479 	rp.addr.type = cp->addr.type;
7480 
7481 	if (err)
7482 		goto complete;
7483 
7484 	rp.local_clock = cpu_to_le32(hdev->clock);
7485 
7486 	if (conn) {
7487 		rp.piconet_clock = cpu_to_le32(conn->clock);
7488 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7489 	}
7490 
7491 complete:
7492 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7493 			  sizeof(rp));
7494 
7495 	mgmt_pending_free(cmd);
7496 }
7497 
7498 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7499 {
7500 	struct mgmt_pending_cmd *cmd = data;
7501 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7502 	struct hci_cp_read_clock hci_cp;
7503 	struct hci_conn *conn;
7504 
7505 	memset(&hci_cp, 0, sizeof(hci_cp));
7506 	hci_read_clock_sync(hdev, &hci_cp);
7507 
7508 	/* Make sure connection still exists */
7509 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7510 	if (!conn || conn->state != BT_CONNECTED)
7511 		return MGMT_STATUS_NOT_CONNECTED;
7512 
7513 	cmd->user_data = conn;
7514 	hci_cp.handle = cpu_to_le16(conn->handle);
7515 	hci_cp.which = 0x01; /* Piconet clock */
7516 
7517 	return hci_read_clock_sync(hdev, &hci_cp);
7518 }
7519 
7520 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7521 								u16 len)
7522 {
7523 	struct mgmt_cp_get_clock_info *cp = data;
7524 	struct mgmt_rp_get_clock_info rp;
7525 	struct mgmt_pending_cmd *cmd;
7526 	struct hci_conn *conn;
7527 	int err;
7528 
7529 	bt_dev_dbg(hdev, "sock %p", sk);
7530 
7531 	memset(&rp, 0, sizeof(rp));
7532 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7533 	rp.addr.type = cp->addr.type;
7534 
7535 	if (cp->addr.type != BDADDR_BREDR)
7536 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7537 					 MGMT_STATUS_INVALID_PARAMS,
7538 					 &rp, sizeof(rp));
7539 
7540 	hci_dev_lock(hdev);
7541 
7542 	if (!hdev_is_powered(hdev)) {
7543 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7544 					MGMT_STATUS_NOT_POWERED, &rp,
7545 					sizeof(rp));
7546 		goto unlock;
7547 	}
7548 
7549 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7550 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7551 					       &cp->addr.bdaddr);
7552 		if (!conn || conn->state != BT_CONNECTED) {
7553 			err = mgmt_cmd_complete(sk, hdev->id,
7554 						MGMT_OP_GET_CLOCK_INFO,
7555 						MGMT_STATUS_NOT_CONNECTED,
7556 						&rp, sizeof(rp));
7557 			goto unlock;
7558 		}
7559 	} else {
7560 		conn = NULL;
7561 	}
7562 
7563 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7564 	if (!cmd)
7565 		err = -ENOMEM;
7566 	else
7567 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7568 					 get_clock_info_complete);
7569 
7570 	if (err < 0) {
7571 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7572 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7573 
7574 		if (cmd)
7575 			mgmt_pending_free(cmd);
7576 	}
7577 
7578 
7579 unlock:
7580 	hci_dev_unlock(hdev);
7581 	return err;
7582 }
7583 
7584 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7585 {
7586 	struct hci_conn *conn;
7587 
7588 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7589 	if (!conn)
7590 		return false;
7591 
7592 	if (conn->dst_type != type)
7593 		return false;
7594 
7595 	if (conn->state != BT_CONNECTED)
7596 		return false;
7597 
7598 	return true;
7599 }
7600 
7601 /* This function requires the caller holds hdev->lock */
7602 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7603 			       u8 addr_type, u8 auto_connect)
7604 {
7605 	struct hci_conn_params *params;
7606 
7607 	params = hci_conn_params_add(hdev, addr, addr_type);
7608 	if (!params)
7609 		return -EIO;
7610 
7611 	if (params->auto_connect == auto_connect)
7612 		return 0;
7613 
7614 	hci_pend_le_list_del_init(params);
7615 
7616 	switch (auto_connect) {
7617 	case HCI_AUTO_CONN_DISABLED:
7618 	case HCI_AUTO_CONN_LINK_LOSS:
7619 		/* If auto connect is being disabled when we're trying to
7620 		 * connect to device, keep connecting.
7621 		 */
7622 		if (params->explicit_connect)
7623 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7624 		break;
7625 	case HCI_AUTO_CONN_REPORT:
7626 		if (params->explicit_connect)
7627 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7628 		else
7629 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7630 		break;
7631 	case HCI_AUTO_CONN_DIRECT:
7632 	case HCI_AUTO_CONN_ALWAYS:
7633 		if (!is_connected(hdev, addr, addr_type))
7634 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7635 		break;
7636 	}
7637 
7638 	params->auto_connect = auto_connect;
7639 
7640 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7641 		   addr, addr_type, auto_connect);
7642 
7643 	return 0;
7644 }
7645 
7646 static void device_added(struct sock *sk, struct hci_dev *hdev,
7647 			 bdaddr_t *bdaddr, u8 type, u8 action)
7648 {
7649 	struct mgmt_ev_device_added ev;
7650 
7651 	bacpy(&ev.addr.bdaddr, bdaddr);
7652 	ev.addr.type = type;
7653 	ev.action = action;
7654 
7655 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7656 }
7657 
7658 static int add_device_sync(struct hci_dev *hdev, void *data)
7659 {
7660 	return hci_update_passive_scan_sync(hdev);
7661 }
7662 
7663 static int add_device(struct sock *sk, struct hci_dev *hdev,
7664 		      void *data, u16 len)
7665 {
7666 	struct mgmt_cp_add_device *cp = data;
7667 	u8 auto_conn, addr_type;
7668 	struct hci_conn_params *params;
7669 	int err;
7670 	u32 current_flags = 0;
7671 	u32 supported_flags;
7672 
7673 	bt_dev_dbg(hdev, "sock %p", sk);
7674 
7675 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7676 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7677 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7678 					 MGMT_STATUS_INVALID_PARAMS,
7679 					 &cp->addr, sizeof(cp->addr));
7680 
7681 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7682 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7683 					 MGMT_STATUS_INVALID_PARAMS,
7684 					 &cp->addr, sizeof(cp->addr));
7685 
7686 	hci_dev_lock(hdev);
7687 
7688 	if (cp->addr.type == BDADDR_BREDR) {
7689 		/* Only incoming connections action is supported for now */
7690 		if (cp->action != 0x01) {
7691 			err = mgmt_cmd_complete(sk, hdev->id,
7692 						MGMT_OP_ADD_DEVICE,
7693 						MGMT_STATUS_INVALID_PARAMS,
7694 						&cp->addr, sizeof(cp->addr));
7695 			goto unlock;
7696 		}
7697 
7698 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7699 						     &cp->addr.bdaddr,
7700 						     cp->addr.type, 0);
7701 		if (err)
7702 			goto unlock;
7703 
7704 		hci_update_scan(hdev);
7705 
7706 		goto added;
7707 	}
7708 
7709 	addr_type = le_addr_type(cp->addr.type);
7710 
7711 	if (cp->action == 0x02)
7712 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7713 	else if (cp->action == 0x01)
7714 		auto_conn = HCI_AUTO_CONN_DIRECT;
7715 	else
7716 		auto_conn = HCI_AUTO_CONN_REPORT;
7717 
7718 	/* Kernel internally uses conn_params with resolvable private
7719 	 * address, but Add Device allows only identity addresses.
7720 	 * Make sure it is enforced before calling
7721 	 * hci_conn_params_lookup.
7722 	 */
7723 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7724 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7725 					MGMT_STATUS_INVALID_PARAMS,
7726 					&cp->addr, sizeof(cp->addr));
7727 		goto unlock;
7728 	}
7729 
7730 	/* If the connection parameters don't exist for this device,
7731 	 * they will be created and configured with defaults.
7732 	 */
7733 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7734 				auto_conn) < 0) {
7735 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7736 					MGMT_STATUS_FAILED, &cp->addr,
7737 					sizeof(cp->addr));
7738 		goto unlock;
7739 	} else {
7740 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7741 						addr_type);
7742 		if (params)
7743 			current_flags = params->flags;
7744 	}
7745 
7746 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7747 	if (err < 0)
7748 		goto unlock;
7749 
7750 added:
7751 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7752 	supported_flags = hdev->conn_flags;
7753 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7754 			     supported_flags, current_flags);
7755 
7756 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7757 				MGMT_STATUS_SUCCESS, &cp->addr,
7758 				sizeof(cp->addr));
7759 
7760 unlock:
7761 	hci_dev_unlock(hdev);
7762 	return err;
7763 }
7764 
7765 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7766 			   bdaddr_t *bdaddr, u8 type)
7767 {
7768 	struct mgmt_ev_device_removed ev;
7769 
7770 	bacpy(&ev.addr.bdaddr, bdaddr);
7771 	ev.addr.type = type;
7772 
7773 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7774 }
7775 
7776 static int remove_device_sync(struct hci_dev *hdev, void *data)
7777 {
7778 	return hci_update_passive_scan_sync(hdev);
7779 }
7780 
7781 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7782 			 void *data, u16 len)
7783 {
7784 	struct mgmt_cp_remove_device *cp = data;
7785 	int err;
7786 
7787 	bt_dev_dbg(hdev, "sock %p", sk);
7788 
7789 	hci_dev_lock(hdev);
7790 
7791 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7792 		struct hci_conn_params *params;
7793 		u8 addr_type;
7794 
7795 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7796 			err = mgmt_cmd_complete(sk, hdev->id,
7797 						MGMT_OP_REMOVE_DEVICE,
7798 						MGMT_STATUS_INVALID_PARAMS,
7799 						&cp->addr, sizeof(cp->addr));
7800 			goto unlock;
7801 		}
7802 
7803 		if (cp->addr.type == BDADDR_BREDR) {
7804 			err = hci_bdaddr_list_del(&hdev->accept_list,
7805 						  &cp->addr.bdaddr,
7806 						  cp->addr.type);
7807 			if (err) {
7808 				err = mgmt_cmd_complete(sk, hdev->id,
7809 							MGMT_OP_REMOVE_DEVICE,
7810 							MGMT_STATUS_INVALID_PARAMS,
7811 							&cp->addr,
7812 							sizeof(cp->addr));
7813 				goto unlock;
7814 			}
7815 
7816 			hci_update_scan(hdev);
7817 
7818 			device_removed(sk, hdev, &cp->addr.bdaddr,
7819 				       cp->addr.type);
7820 			goto complete;
7821 		}
7822 
7823 		addr_type = le_addr_type(cp->addr.type);
7824 
7825 		/* Kernel internally uses conn_params with resolvable private
7826 		 * address, but Remove Device allows only identity addresses.
7827 		 * Make sure it is enforced before calling
7828 		 * hci_conn_params_lookup.
7829 		 */
7830 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7831 			err = mgmt_cmd_complete(sk, hdev->id,
7832 						MGMT_OP_REMOVE_DEVICE,
7833 						MGMT_STATUS_INVALID_PARAMS,
7834 						&cp->addr, sizeof(cp->addr));
7835 			goto unlock;
7836 		}
7837 
7838 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7839 						addr_type);
7840 		if (!params) {
7841 			err = mgmt_cmd_complete(sk, hdev->id,
7842 						MGMT_OP_REMOVE_DEVICE,
7843 						MGMT_STATUS_INVALID_PARAMS,
7844 						&cp->addr, sizeof(cp->addr));
7845 			goto unlock;
7846 		}
7847 
7848 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7849 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7850 			err = mgmt_cmd_complete(sk, hdev->id,
7851 						MGMT_OP_REMOVE_DEVICE,
7852 						MGMT_STATUS_INVALID_PARAMS,
7853 						&cp->addr, sizeof(cp->addr));
7854 			goto unlock;
7855 		}
7856 
7857 		hci_conn_params_free(params);
7858 
7859 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7860 	} else {
7861 		struct hci_conn_params *p, *tmp;
7862 		struct bdaddr_list *b, *btmp;
7863 
7864 		if (cp->addr.type) {
7865 			err = mgmt_cmd_complete(sk, hdev->id,
7866 						MGMT_OP_REMOVE_DEVICE,
7867 						MGMT_STATUS_INVALID_PARAMS,
7868 						&cp->addr, sizeof(cp->addr));
7869 			goto unlock;
7870 		}
7871 
7872 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7873 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7874 			list_del(&b->list);
7875 			kfree(b);
7876 		}
7877 
7878 		hci_update_scan(hdev);
7879 
7880 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7881 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7882 				continue;
7883 			device_removed(sk, hdev, &p->addr, p->addr_type);
7884 			if (p->explicit_connect) {
7885 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7886 				continue;
7887 			}
7888 			hci_conn_params_free(p);
7889 		}
7890 
7891 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7892 	}
7893 
7894 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7895 
7896 complete:
7897 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7898 				MGMT_STATUS_SUCCESS, &cp->addr,
7899 				sizeof(cp->addr));
7900 unlock:
7901 	hci_dev_unlock(hdev);
7902 	return err;
7903 }
7904 
7905 static int conn_update_sync(struct hci_dev *hdev, void *data)
7906 {
7907 	struct hci_conn_params *params = data;
7908 	struct hci_conn *conn;
7909 
7910 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7911 	if (!conn)
7912 		return -ECANCELED;
7913 
7914 	return hci_le_conn_update_sync(hdev, conn, params);
7915 }
7916 
7917 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7918 			   u16 len)
7919 {
7920 	struct mgmt_cp_load_conn_param *cp = data;
7921 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7922 				     sizeof(struct mgmt_conn_param));
7923 	u16 param_count, expected_len;
7924 	int i;
7925 
7926 	if (!lmp_le_capable(hdev))
7927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7928 				       MGMT_STATUS_NOT_SUPPORTED);
7929 
7930 	param_count = __le16_to_cpu(cp->param_count);
7931 	if (param_count > max_param_count) {
7932 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7933 			   param_count);
7934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7935 				       MGMT_STATUS_INVALID_PARAMS);
7936 	}
7937 
7938 	expected_len = struct_size(cp, params, param_count);
7939 	if (expected_len != len) {
7940 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7941 			   expected_len, len);
7942 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7943 				       MGMT_STATUS_INVALID_PARAMS);
7944 	}
7945 
7946 	bt_dev_dbg(hdev, "param_count %u", param_count);
7947 
7948 	hci_dev_lock(hdev);
7949 
7950 	if (param_count > 1)
7951 		hci_conn_params_clear_disabled(hdev);
7952 
7953 	for (i = 0; i < param_count; i++) {
7954 		struct mgmt_conn_param *param = &cp->params[i];
7955 		struct hci_conn_params *hci_param;
7956 		u16 min, max, latency, timeout;
7957 		bool update = false;
7958 		u8 addr_type;
7959 
7960 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7961 			   param->addr.type);
7962 
7963 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7964 			addr_type = ADDR_LE_DEV_PUBLIC;
7965 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7966 			addr_type = ADDR_LE_DEV_RANDOM;
7967 		} else {
7968 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7969 			continue;
7970 		}
7971 
7972 		min = le16_to_cpu(param->min_interval);
7973 		max = le16_to_cpu(param->max_interval);
7974 		latency = le16_to_cpu(param->latency);
7975 		timeout = le16_to_cpu(param->timeout);
7976 
7977 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7978 			   min, max, latency, timeout);
7979 
7980 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7981 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7982 			continue;
7983 		}
7984 
7985 		/* Detect when the loading is for an existing parameter then
7986 		 * attempt to trigger the connection update procedure.
7987 		 */
7988 		if (!i && param_count == 1) {
7989 			hci_param = hci_conn_params_lookup(hdev,
7990 							   &param->addr.bdaddr,
7991 							   addr_type);
7992 			if (hci_param)
7993 				update = true;
7994 			else
7995 				hci_conn_params_clear_disabled(hdev);
7996 		}
7997 
7998 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7999 						addr_type);
8000 		if (!hci_param) {
8001 			bt_dev_err(hdev, "failed to add connection parameters");
8002 			continue;
8003 		}
8004 
8005 		hci_param->conn_min_interval = min;
8006 		hci_param->conn_max_interval = max;
8007 		hci_param->conn_latency = latency;
8008 		hci_param->supervision_timeout = timeout;
8009 
8010 		/* Check if we need to trigger a connection update */
8011 		if (update) {
8012 			struct hci_conn *conn;
8013 
8014 			/* Lookup for existing connection as central and check
8015 			 * if parameters match and if they don't then trigger
8016 			 * a connection update.
8017 			 */
8018 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8019 						       addr_type);
8020 			if (conn && conn->role == HCI_ROLE_MASTER &&
8021 			    (conn->le_conn_min_interval != min ||
8022 			     conn->le_conn_max_interval != max ||
8023 			     conn->le_conn_latency != latency ||
8024 			     conn->le_supv_timeout != timeout))
8025 				hci_cmd_sync_queue(hdev, conn_update_sync,
8026 						   hci_param, NULL);
8027 		}
8028 	}
8029 
8030 	hci_dev_unlock(hdev);
8031 
8032 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8033 				 NULL, 0);
8034 }
8035 
8036 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8037 			       void *data, u16 len)
8038 {
8039 	struct mgmt_cp_set_external_config *cp = data;
8040 	bool changed;
8041 	int err;
8042 
8043 	bt_dev_dbg(hdev, "sock %p", sk);
8044 
8045 	if (hdev_is_powered(hdev))
8046 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8047 				       MGMT_STATUS_REJECTED);
8048 
8049 	if (cp->config != 0x00 && cp->config != 0x01)
8050 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8051 				         MGMT_STATUS_INVALID_PARAMS);
8052 
8053 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8054 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8055 				       MGMT_STATUS_NOT_SUPPORTED);
8056 
8057 	hci_dev_lock(hdev);
8058 
8059 	if (cp->config)
8060 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8061 	else
8062 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8063 
8064 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8065 	if (err < 0)
8066 		goto unlock;
8067 
8068 	if (!changed)
8069 		goto unlock;
8070 
8071 	err = new_options(hdev, sk);
8072 
8073 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8074 		mgmt_index_removed(hdev);
8075 
8076 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8077 			hci_dev_set_flag(hdev, HCI_CONFIG);
8078 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8079 
8080 			queue_work(hdev->req_workqueue, &hdev->power_on);
8081 		} else {
8082 			set_bit(HCI_RAW, &hdev->flags);
8083 			mgmt_index_added(hdev);
8084 		}
8085 	}
8086 
8087 unlock:
8088 	hci_dev_unlock(hdev);
8089 	return err;
8090 }
8091 
8092 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8093 			      void *data, u16 len)
8094 {
8095 	struct mgmt_cp_set_public_address *cp = data;
8096 	bool changed;
8097 	int err;
8098 
8099 	bt_dev_dbg(hdev, "sock %p", sk);
8100 
8101 	if (hdev_is_powered(hdev))
8102 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8103 				       MGMT_STATUS_REJECTED);
8104 
8105 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8106 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8107 				       MGMT_STATUS_INVALID_PARAMS);
8108 
8109 	if (!hdev->set_bdaddr)
8110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8111 				       MGMT_STATUS_NOT_SUPPORTED);
8112 
8113 	hci_dev_lock(hdev);
8114 
8115 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8116 	bacpy(&hdev->public_addr, &cp->bdaddr);
8117 
8118 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8119 	if (err < 0)
8120 		goto unlock;
8121 
8122 	if (!changed)
8123 		goto unlock;
8124 
8125 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8126 		err = new_options(hdev, sk);
8127 
8128 	if (is_configured(hdev)) {
8129 		mgmt_index_removed(hdev);
8130 
8131 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8132 
8133 		hci_dev_set_flag(hdev, HCI_CONFIG);
8134 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8135 
8136 		queue_work(hdev->req_workqueue, &hdev->power_on);
8137 	}
8138 
8139 unlock:
8140 	hci_dev_unlock(hdev);
8141 	return err;
8142 }
8143 
8144 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8145 					     int err)
8146 {
8147 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8148 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8149 	u8 *h192, *r192, *h256, *r256;
8150 	struct mgmt_pending_cmd *cmd = data;
8151 	struct sk_buff *skb = cmd->skb;
8152 	u8 status = mgmt_status(err);
8153 	u16 eir_len;
8154 
8155 	if (err == -ECANCELED ||
8156 	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8157 		return;
8158 
8159 	if (!status) {
8160 		if (!skb)
8161 			status = MGMT_STATUS_FAILED;
8162 		else if (IS_ERR(skb))
8163 			status = mgmt_status(PTR_ERR(skb));
8164 		else
8165 			status = mgmt_status(skb->data[0]);
8166 	}
8167 
8168 	bt_dev_dbg(hdev, "status %u", status);
8169 
8170 	mgmt_cp = cmd->param;
8171 
8172 	if (status) {
8173 		status = mgmt_status(status);
8174 		eir_len = 0;
8175 
8176 		h192 = NULL;
8177 		r192 = NULL;
8178 		h256 = NULL;
8179 		r256 = NULL;
8180 	} else if (!bredr_sc_enabled(hdev)) {
8181 		struct hci_rp_read_local_oob_data *rp;
8182 
8183 		if (skb->len != sizeof(*rp)) {
8184 			status = MGMT_STATUS_FAILED;
8185 			eir_len = 0;
8186 		} else {
8187 			status = MGMT_STATUS_SUCCESS;
8188 			rp = (void *)skb->data;
8189 
8190 			eir_len = 5 + 18 + 18;
8191 			h192 = rp->hash;
8192 			r192 = rp->rand;
8193 			h256 = NULL;
8194 			r256 = NULL;
8195 		}
8196 	} else {
8197 		struct hci_rp_read_local_oob_ext_data *rp;
8198 
8199 		if (skb->len != sizeof(*rp)) {
8200 			status = MGMT_STATUS_FAILED;
8201 			eir_len = 0;
8202 		} else {
8203 			status = MGMT_STATUS_SUCCESS;
8204 			rp = (void *)skb->data;
8205 
8206 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8207 				eir_len = 5 + 18 + 18;
8208 				h192 = NULL;
8209 				r192 = NULL;
8210 			} else {
8211 				eir_len = 5 + 18 + 18 + 18 + 18;
8212 				h192 = rp->hash192;
8213 				r192 = rp->rand192;
8214 			}
8215 
8216 			h256 = rp->hash256;
8217 			r256 = rp->rand256;
8218 		}
8219 	}
8220 
8221 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8222 	if (!mgmt_rp)
8223 		goto done;
8224 
8225 	if (eir_len == 0)
8226 		goto send_rsp;
8227 
8228 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8229 				  hdev->dev_class, 3);
8230 
8231 	if (h192 && r192) {
8232 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8233 					  EIR_SSP_HASH_C192, h192, 16);
8234 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8235 					  EIR_SSP_RAND_R192, r192, 16);
8236 	}
8237 
8238 	if (h256 && r256) {
8239 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8240 					  EIR_SSP_HASH_C256, h256, 16);
8241 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8242 					  EIR_SSP_RAND_R256, r256, 16);
8243 	}
8244 
8245 send_rsp:
8246 	mgmt_rp->type = mgmt_cp->type;
8247 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8248 
8249 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8250 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8251 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8252 	if (err < 0 || status)
8253 		goto done;
8254 
8255 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8256 
8257 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8258 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8259 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8260 done:
8261 	if (skb && !IS_ERR(skb))
8262 		kfree_skb(skb);
8263 
8264 	kfree(mgmt_rp);
8265 	mgmt_pending_remove(cmd);
8266 }
8267 
8268 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8269 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8270 {
8271 	struct mgmt_pending_cmd *cmd;
8272 	int err;
8273 
8274 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8275 			       cp, sizeof(*cp));
8276 	if (!cmd)
8277 		return -ENOMEM;
8278 
8279 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8280 				 read_local_oob_ext_data_complete);
8281 
8282 	if (err < 0) {
8283 		mgmt_pending_remove(cmd);
8284 		return err;
8285 	}
8286 
8287 	return 0;
8288 }
8289 
8290 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8291 				   void *data, u16 data_len)
8292 {
8293 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8294 	struct mgmt_rp_read_local_oob_ext_data *rp;
8295 	size_t rp_len;
8296 	u16 eir_len;
8297 	u8 status, flags, role, addr[7], hash[16], rand[16];
8298 	int err;
8299 
8300 	bt_dev_dbg(hdev, "sock %p", sk);
8301 
8302 	if (hdev_is_powered(hdev)) {
8303 		switch (cp->type) {
8304 		case BIT(BDADDR_BREDR):
8305 			status = mgmt_bredr_support(hdev);
8306 			if (status)
8307 				eir_len = 0;
8308 			else
8309 				eir_len = 5;
8310 			break;
8311 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8312 			status = mgmt_le_support(hdev);
8313 			if (status)
8314 				eir_len = 0;
8315 			else
8316 				eir_len = 9 + 3 + 18 + 18 + 3;
8317 			break;
8318 		default:
8319 			status = MGMT_STATUS_INVALID_PARAMS;
8320 			eir_len = 0;
8321 			break;
8322 		}
8323 	} else {
8324 		status = MGMT_STATUS_NOT_POWERED;
8325 		eir_len = 0;
8326 	}
8327 
8328 	rp_len = sizeof(*rp) + eir_len;
8329 	rp = kmalloc(rp_len, GFP_ATOMIC);
8330 	if (!rp)
8331 		return -ENOMEM;
8332 
8333 	if (!status && !lmp_ssp_capable(hdev)) {
8334 		status = MGMT_STATUS_NOT_SUPPORTED;
8335 		eir_len = 0;
8336 	}
8337 
8338 	if (status)
8339 		goto complete;
8340 
8341 	hci_dev_lock(hdev);
8342 
8343 	eir_len = 0;
8344 	switch (cp->type) {
8345 	case BIT(BDADDR_BREDR):
8346 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8347 			err = read_local_ssp_oob_req(hdev, sk, cp);
8348 			hci_dev_unlock(hdev);
8349 			if (!err)
8350 				goto done;
8351 
8352 			status = MGMT_STATUS_FAILED;
8353 			goto complete;
8354 		} else {
8355 			eir_len = eir_append_data(rp->eir, eir_len,
8356 						  EIR_CLASS_OF_DEV,
8357 						  hdev->dev_class, 3);
8358 		}
8359 		break;
8360 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8361 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8362 		    smp_generate_oob(hdev, hash, rand) < 0) {
8363 			hci_dev_unlock(hdev);
8364 			status = MGMT_STATUS_FAILED;
8365 			goto complete;
8366 		}
8367 
8368 		/* This should return the active RPA, but since the RPA
8369 		 * is only programmed on demand, it is really hard to fill
8370 		 * this in at the moment. For now disallow retrieving
8371 		 * local out-of-band data when privacy is in use.
8372 		 *
8373 		 * Returning the identity address will not help here since
8374 		 * pairing happens before the identity resolving key is
8375 		 * known and thus the connection establishment happens
8376 		 * based on the RPA and not the identity address.
8377 		 */
8378 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8379 			hci_dev_unlock(hdev);
8380 			status = MGMT_STATUS_REJECTED;
8381 			goto complete;
8382 		}
8383 
8384 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8385 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8386 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8387 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8388 			memcpy(addr, &hdev->static_addr, 6);
8389 			addr[6] = 0x01;
8390 		} else {
8391 			memcpy(addr, &hdev->bdaddr, 6);
8392 			addr[6] = 0x00;
8393 		}
8394 
8395 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8396 					  addr, sizeof(addr));
8397 
8398 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8399 			role = 0x02;
8400 		else
8401 			role = 0x01;
8402 
8403 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8404 					  &role, sizeof(role));
8405 
8406 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8407 			eir_len = eir_append_data(rp->eir, eir_len,
8408 						  EIR_LE_SC_CONFIRM,
8409 						  hash, sizeof(hash));
8410 
8411 			eir_len = eir_append_data(rp->eir, eir_len,
8412 						  EIR_LE_SC_RANDOM,
8413 						  rand, sizeof(rand));
8414 		}
8415 
8416 		flags = mgmt_get_adv_discov_flags(hdev);
8417 
8418 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8419 			flags |= LE_AD_NO_BREDR;
8420 
8421 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8422 					  &flags, sizeof(flags));
8423 		break;
8424 	}
8425 
8426 	hci_dev_unlock(hdev);
8427 
8428 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8429 
8430 	status = MGMT_STATUS_SUCCESS;
8431 
8432 complete:
8433 	rp->type = cp->type;
8434 	rp->eir_len = cpu_to_le16(eir_len);
8435 
8436 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8437 				status, rp, sizeof(*rp) + eir_len);
8438 	if (err < 0 || status)
8439 		goto done;
8440 
8441 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8442 				 rp, sizeof(*rp) + eir_len,
8443 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8444 
8445 done:
8446 	kfree(rp);
8447 
8448 	return err;
8449 }
8450 
8451 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8452 {
8453 	u32 flags = 0;
8454 
8455 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8456 	flags |= MGMT_ADV_FLAG_DISCOV;
8457 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8458 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8459 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8460 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8461 	flags |= MGMT_ADV_PARAM_DURATION;
8462 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8463 	flags |= MGMT_ADV_PARAM_INTERVALS;
8464 	flags |= MGMT_ADV_PARAM_TX_POWER;
8465 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8466 
8467 	/* In extended adv TX_POWER returned from Set Adv Param
8468 	 * will be always valid.
8469 	 */
8470 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8471 		flags |= MGMT_ADV_FLAG_TX_POWER;
8472 
8473 	if (ext_adv_capable(hdev)) {
8474 		flags |= MGMT_ADV_FLAG_SEC_1M;
8475 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8476 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8477 
8478 		if (le_2m_capable(hdev))
8479 			flags |= MGMT_ADV_FLAG_SEC_2M;
8480 
8481 		if (le_coded_capable(hdev))
8482 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8483 	}
8484 
8485 	return flags;
8486 }
8487 
8488 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8489 			     void *data, u16 data_len)
8490 {
8491 	struct mgmt_rp_read_adv_features *rp;
8492 	size_t rp_len;
8493 	int err;
8494 	struct adv_info *adv_instance;
8495 	u32 supported_flags;
8496 	u8 *instance;
8497 
8498 	bt_dev_dbg(hdev, "sock %p", sk);
8499 
8500 	if (!lmp_le_capable(hdev))
8501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8502 				       MGMT_STATUS_REJECTED);
8503 
8504 	hci_dev_lock(hdev);
8505 
8506 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8507 	rp = kmalloc(rp_len, GFP_ATOMIC);
8508 	if (!rp) {
8509 		hci_dev_unlock(hdev);
8510 		return -ENOMEM;
8511 	}
8512 
8513 	supported_flags = get_supported_adv_flags(hdev);
8514 
8515 	rp->supported_flags = cpu_to_le32(supported_flags);
8516 	rp->max_adv_data_len = max_adv_len(hdev);
8517 	rp->max_scan_rsp_len = max_adv_len(hdev);
8518 	rp->max_instances = hdev->le_num_of_adv_sets;
8519 	rp->num_instances = hdev->adv_instance_cnt;
8520 
8521 	instance = rp->instance;
8522 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8523 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8524 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8525 			*instance = adv_instance->instance;
8526 			instance++;
8527 		} else {
8528 			rp->num_instances--;
8529 			rp_len--;
8530 		}
8531 	}
8532 
8533 	hci_dev_unlock(hdev);
8534 
8535 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8536 				MGMT_STATUS_SUCCESS, rp, rp_len);
8537 
8538 	kfree(rp);
8539 
8540 	return err;
8541 }
8542 
8543 static u8 calculate_name_len(struct hci_dev *hdev)
8544 {
8545 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8546 
8547 	return eir_append_local_name(hdev, buf, 0);
8548 }
8549 
8550 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8551 			   bool is_adv_data)
8552 {
8553 	u8 max_len = max_adv_len(hdev);
8554 
8555 	if (is_adv_data) {
8556 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8557 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8558 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8559 			max_len -= 3;
8560 
8561 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8562 			max_len -= 3;
8563 	} else {
8564 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8565 			max_len -= calculate_name_len(hdev);
8566 
8567 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8568 			max_len -= 4;
8569 	}
8570 
8571 	return max_len;
8572 }
8573 
8574 static bool flags_managed(u32 adv_flags)
8575 {
8576 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8577 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8578 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8579 }
8580 
8581 static bool tx_power_managed(u32 adv_flags)
8582 {
8583 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8584 }
8585 
8586 static bool name_managed(u32 adv_flags)
8587 {
8588 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8589 }
8590 
8591 static bool appearance_managed(u32 adv_flags)
8592 {
8593 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8594 }
8595 
8596 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8597 			      u8 len, bool is_adv_data)
8598 {
8599 	int i, cur_len;
8600 	u8 max_len;
8601 
8602 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8603 
8604 	if (len > max_len)
8605 		return false;
8606 
8607 	/* Make sure that the data is correctly formatted. */
8608 	for (i = 0; i < len; i += (cur_len + 1)) {
8609 		cur_len = data[i];
8610 
8611 		if (!cur_len)
8612 			continue;
8613 
8614 		if (data[i + 1] == EIR_FLAGS &&
8615 		    (!is_adv_data || flags_managed(adv_flags)))
8616 			return false;
8617 
8618 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8619 			return false;
8620 
8621 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8622 			return false;
8623 
8624 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8625 			return false;
8626 
8627 		if (data[i + 1] == EIR_APPEARANCE &&
8628 		    appearance_managed(adv_flags))
8629 			return false;
8630 
8631 		/* If the current field length would exceed the total data
8632 		 * length, then it's invalid.
8633 		 */
8634 		if (i + cur_len >= len)
8635 			return false;
8636 	}
8637 
8638 	return true;
8639 }
8640 
8641 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8642 {
8643 	u32 supported_flags, phy_flags;
8644 
8645 	/* The current implementation only supports a subset of the specified
8646 	 * flags. Also need to check mutual exclusiveness of sec flags.
8647 	 */
8648 	supported_flags = get_supported_adv_flags(hdev);
8649 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8650 	if (adv_flags & ~supported_flags ||
8651 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8652 		return false;
8653 
8654 	return true;
8655 }
8656 
8657 static bool adv_busy(struct hci_dev *hdev)
8658 {
8659 	return pending_find(MGMT_OP_SET_LE, hdev);
8660 }
8661 
8662 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8663 			     int err)
8664 {
8665 	struct adv_info *adv, *n;
8666 
8667 	bt_dev_dbg(hdev, "err %d", err);
8668 
8669 	hci_dev_lock(hdev);
8670 
8671 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8672 		u8 instance;
8673 
8674 		if (!adv->pending)
8675 			continue;
8676 
8677 		if (!err) {
8678 			adv->pending = false;
8679 			continue;
8680 		}
8681 
8682 		instance = adv->instance;
8683 
8684 		if (hdev->cur_adv_instance == instance)
8685 			cancel_adv_timeout(hdev);
8686 
8687 		hci_remove_adv_instance(hdev, instance);
8688 		mgmt_advertising_removed(sk, hdev, instance);
8689 	}
8690 
8691 	hci_dev_unlock(hdev);
8692 }
8693 
8694 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8695 {
8696 	struct mgmt_pending_cmd *cmd = data;
8697 	struct mgmt_cp_add_advertising *cp = cmd->param;
8698 	struct mgmt_rp_add_advertising rp;
8699 
8700 	memset(&rp, 0, sizeof(rp));
8701 
8702 	rp.instance = cp->instance;
8703 
8704 	if (err)
8705 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8706 				mgmt_status(err));
8707 	else
8708 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8709 				  mgmt_status(err), &rp, sizeof(rp));
8710 
8711 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8712 
8713 	mgmt_pending_free(cmd);
8714 }
8715 
8716 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8717 {
8718 	struct mgmt_pending_cmd *cmd = data;
8719 	struct mgmt_cp_add_advertising *cp = cmd->param;
8720 
8721 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8722 }
8723 
8724 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8725 			   void *data, u16 data_len)
8726 {
8727 	struct mgmt_cp_add_advertising *cp = data;
8728 	struct mgmt_rp_add_advertising rp;
8729 	u32 flags;
8730 	u8 status;
8731 	u16 timeout, duration;
8732 	unsigned int prev_instance_cnt;
8733 	u8 schedule_instance = 0;
8734 	struct adv_info *adv, *next_instance;
8735 	int err;
8736 	struct mgmt_pending_cmd *cmd;
8737 
8738 	bt_dev_dbg(hdev, "sock %p", sk);
8739 
8740 	status = mgmt_le_support(hdev);
8741 	if (status)
8742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8743 				       status);
8744 
8745 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8747 				       MGMT_STATUS_INVALID_PARAMS);
8748 
8749 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8751 				       MGMT_STATUS_INVALID_PARAMS);
8752 
8753 	flags = __le32_to_cpu(cp->flags);
8754 	timeout = __le16_to_cpu(cp->timeout);
8755 	duration = __le16_to_cpu(cp->duration);
8756 
8757 	if (!requested_adv_flags_are_valid(hdev, flags))
8758 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8759 				       MGMT_STATUS_INVALID_PARAMS);
8760 
8761 	hci_dev_lock(hdev);
8762 
8763 	if (timeout && !hdev_is_powered(hdev)) {
8764 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8765 				      MGMT_STATUS_REJECTED);
8766 		goto unlock;
8767 	}
8768 
8769 	if (adv_busy(hdev)) {
8770 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8771 				      MGMT_STATUS_BUSY);
8772 		goto unlock;
8773 	}
8774 
8775 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8776 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8777 			       cp->scan_rsp_len, false)) {
8778 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8779 				      MGMT_STATUS_INVALID_PARAMS);
8780 		goto unlock;
8781 	}
8782 
8783 	prev_instance_cnt = hdev->adv_instance_cnt;
8784 
8785 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8786 				   cp->adv_data_len, cp->data,
8787 				   cp->scan_rsp_len,
8788 				   cp->data + cp->adv_data_len,
8789 				   timeout, duration,
8790 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8791 				   hdev->le_adv_min_interval,
8792 				   hdev->le_adv_max_interval, 0);
8793 	if (IS_ERR(adv)) {
8794 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8795 				      MGMT_STATUS_FAILED);
8796 		goto unlock;
8797 	}
8798 
8799 	/* Only trigger an advertising added event if a new instance was
8800 	 * actually added.
8801 	 */
8802 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8803 		mgmt_advertising_added(sk, hdev, cp->instance);
8804 
8805 	if (hdev->cur_adv_instance == cp->instance) {
8806 		/* If the currently advertised instance is being changed then
8807 		 * cancel the current advertising and schedule the next
8808 		 * instance. If there is only one instance then the overridden
8809 		 * advertising data will be visible right away.
8810 		 */
8811 		cancel_adv_timeout(hdev);
8812 
8813 		next_instance = hci_get_next_instance(hdev, cp->instance);
8814 		if (next_instance)
8815 			schedule_instance = next_instance->instance;
8816 	} else if (!hdev->adv_instance_timeout) {
8817 		/* Immediately advertise the new instance if no other
8818 		 * instance is currently being advertised.
8819 		 */
8820 		schedule_instance = cp->instance;
8821 	}
8822 
8823 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8824 	 * there is no instance to be advertised then we have no HCI
8825 	 * communication to make. Simply return.
8826 	 */
8827 	if (!hdev_is_powered(hdev) ||
8828 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8829 	    !schedule_instance) {
8830 		rp.instance = cp->instance;
8831 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8832 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8833 		goto unlock;
8834 	}
8835 
8836 	/* We're good to go, update advertising data, parameters, and start
8837 	 * advertising.
8838 	 */
8839 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8840 			       data_len);
8841 	if (!cmd) {
8842 		err = -ENOMEM;
8843 		goto unlock;
8844 	}
8845 
8846 	cp->instance = schedule_instance;
8847 
8848 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8849 				 add_advertising_complete);
8850 	if (err < 0)
8851 		mgmt_pending_free(cmd);
8852 
8853 unlock:
8854 	hci_dev_unlock(hdev);
8855 
8856 	return err;
8857 }
8858 
8859 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8860 					int err)
8861 {
8862 	struct mgmt_pending_cmd *cmd = data;
8863 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8864 	struct mgmt_rp_add_ext_adv_params rp;
8865 	struct adv_info *adv;
8866 	u32 flags;
8867 
8868 	BT_DBG("%s", hdev->name);
8869 
8870 	hci_dev_lock(hdev);
8871 
8872 	adv = hci_find_adv_instance(hdev, cp->instance);
8873 	if (!adv)
8874 		goto unlock;
8875 
8876 	rp.instance = cp->instance;
8877 	rp.tx_power = adv->tx_power;
8878 
8879 	/* While we're at it, inform userspace of the available space for this
8880 	 * advertisement, given the flags that will be used.
8881 	 */
8882 	flags = __le32_to_cpu(cp->flags);
8883 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8884 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8885 
8886 	if (err) {
8887 		/* If this advertisement was previously advertising and we
8888 		 * failed to update it, we signal that it has been removed and
8889 		 * delete its structure
8890 		 */
8891 		if (!adv->pending)
8892 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8893 
8894 		hci_remove_adv_instance(hdev, cp->instance);
8895 
8896 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8897 				mgmt_status(err));
8898 	} else {
8899 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8900 				  mgmt_status(err), &rp, sizeof(rp));
8901 	}
8902 
8903 unlock:
8904 	mgmt_pending_free(cmd);
8905 
8906 	hci_dev_unlock(hdev);
8907 }
8908 
8909 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8910 {
8911 	struct mgmt_pending_cmd *cmd = data;
8912 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8913 
8914 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8915 }
8916 
8917 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8918 			      void *data, u16 data_len)
8919 {
8920 	struct mgmt_cp_add_ext_adv_params *cp = data;
8921 	struct mgmt_rp_add_ext_adv_params rp;
8922 	struct mgmt_pending_cmd *cmd = NULL;
8923 	struct adv_info *adv;
8924 	u32 flags, min_interval, max_interval;
8925 	u16 timeout, duration;
8926 	u8 status;
8927 	s8 tx_power;
8928 	int err;
8929 
8930 	BT_DBG("%s", hdev->name);
8931 
8932 	status = mgmt_le_support(hdev);
8933 	if (status)
8934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8935 				       status);
8936 
8937 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8939 				       MGMT_STATUS_INVALID_PARAMS);
8940 
8941 	/* The purpose of breaking add_advertising into two separate MGMT calls
8942 	 * for params and data is to allow more parameters to be added to this
8943 	 * structure in the future. For this reason, we verify that we have the
8944 	 * bare minimum structure we know of when the interface was defined. Any
8945 	 * extra parameters we don't know about will be ignored in this request.
8946 	 */
8947 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8948 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8949 				       MGMT_STATUS_INVALID_PARAMS);
8950 
8951 	flags = __le32_to_cpu(cp->flags);
8952 
8953 	if (!requested_adv_flags_are_valid(hdev, flags))
8954 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8955 				       MGMT_STATUS_INVALID_PARAMS);
8956 
8957 	hci_dev_lock(hdev);
8958 
8959 	/* In new interface, we require that we are powered to register */
8960 	if (!hdev_is_powered(hdev)) {
8961 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8962 				      MGMT_STATUS_REJECTED);
8963 		goto unlock;
8964 	}
8965 
8966 	if (adv_busy(hdev)) {
8967 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8968 				      MGMT_STATUS_BUSY);
8969 		goto unlock;
8970 	}
8971 
8972 	/* Parse defined parameters from request, use defaults otherwise */
8973 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8974 		  __le16_to_cpu(cp->timeout) : 0;
8975 
8976 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8977 		   __le16_to_cpu(cp->duration) :
8978 		   hdev->def_multi_adv_rotation_duration;
8979 
8980 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8981 		       __le32_to_cpu(cp->min_interval) :
8982 		       hdev->le_adv_min_interval;
8983 
8984 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8985 		       __le32_to_cpu(cp->max_interval) :
8986 		       hdev->le_adv_max_interval;
8987 
8988 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8989 		   cp->tx_power :
8990 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8991 
8992 	/* Create advertising instance with no advertising or response data */
8993 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8994 				   timeout, duration, tx_power, min_interval,
8995 				   max_interval, 0);
8996 
8997 	if (IS_ERR(adv)) {
8998 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8999 				      MGMT_STATUS_FAILED);
9000 		goto unlock;
9001 	}
9002 
9003 	/* Submit request for advertising params if ext adv available */
9004 	if (ext_adv_capable(hdev)) {
9005 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
9006 				       data, data_len);
9007 		if (!cmd) {
9008 			err = -ENOMEM;
9009 			hci_remove_adv_instance(hdev, cp->instance);
9010 			goto unlock;
9011 		}
9012 
9013 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9014 					 add_ext_adv_params_complete);
9015 		if (err < 0)
9016 			mgmt_pending_free(cmd);
9017 	} else {
9018 		rp.instance = cp->instance;
9019 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9020 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9021 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9022 		err = mgmt_cmd_complete(sk, hdev->id,
9023 					MGMT_OP_ADD_EXT_ADV_PARAMS,
9024 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9025 	}
9026 
9027 unlock:
9028 	hci_dev_unlock(hdev);
9029 
9030 	return err;
9031 }
9032 
9033 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9034 {
9035 	struct mgmt_pending_cmd *cmd = data;
9036 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9037 	struct mgmt_rp_add_advertising rp;
9038 
9039 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
9040 
9041 	memset(&rp, 0, sizeof(rp));
9042 
9043 	rp.instance = cp->instance;
9044 
9045 	if (err)
9046 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9047 				mgmt_status(err));
9048 	else
9049 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9050 				  mgmt_status(err), &rp, sizeof(rp));
9051 
9052 	mgmt_pending_free(cmd);
9053 }
9054 
9055 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9056 {
9057 	struct mgmt_pending_cmd *cmd = data;
9058 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9059 	int err;
9060 
9061 	if (ext_adv_capable(hdev)) {
9062 		err = hci_update_adv_data_sync(hdev, cp->instance);
9063 		if (err)
9064 			return err;
9065 
9066 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9067 		if (err)
9068 			return err;
9069 
9070 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
9071 	}
9072 
9073 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9074 }
9075 
9076 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9077 			    u16 data_len)
9078 {
9079 	struct mgmt_cp_add_ext_adv_data *cp = data;
9080 	struct mgmt_rp_add_ext_adv_data rp;
9081 	u8 schedule_instance = 0;
9082 	struct adv_info *next_instance;
9083 	struct adv_info *adv_instance;
9084 	int err = 0;
9085 	struct mgmt_pending_cmd *cmd;
9086 
9087 	BT_DBG("%s", hdev->name);
9088 
9089 	hci_dev_lock(hdev);
9090 
9091 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9092 
9093 	if (!adv_instance) {
9094 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9095 				      MGMT_STATUS_INVALID_PARAMS);
9096 		goto unlock;
9097 	}
9098 
9099 	/* In new interface, we require that we are powered to register */
9100 	if (!hdev_is_powered(hdev)) {
9101 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9102 				      MGMT_STATUS_REJECTED);
9103 		goto clear_new_instance;
9104 	}
9105 
9106 	if (adv_busy(hdev)) {
9107 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9108 				      MGMT_STATUS_BUSY);
9109 		goto clear_new_instance;
9110 	}
9111 
9112 	/* Validate new data */
9113 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9114 			       cp->adv_data_len, true) ||
9115 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9116 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9117 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9118 				      MGMT_STATUS_INVALID_PARAMS);
9119 		goto clear_new_instance;
9120 	}
9121 
9122 	/* Set the data in the advertising instance */
9123 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9124 				  cp->data, cp->scan_rsp_len,
9125 				  cp->data + cp->adv_data_len);
9126 
9127 	/* If using software rotation, determine next instance to use */
9128 	if (hdev->cur_adv_instance == cp->instance) {
9129 		/* If the currently advertised instance is being changed
9130 		 * then cancel the current advertising and schedule the
9131 		 * next instance. If there is only one instance then the
9132 		 * overridden advertising data will be visible right
9133 		 * away
9134 		 */
9135 		cancel_adv_timeout(hdev);
9136 
9137 		next_instance = hci_get_next_instance(hdev, cp->instance);
9138 		if (next_instance)
9139 			schedule_instance = next_instance->instance;
9140 	} else if (!hdev->adv_instance_timeout) {
9141 		/* Immediately advertise the new instance if no other
9142 		 * instance is currently being advertised.
9143 		 */
9144 		schedule_instance = cp->instance;
9145 	}
9146 
9147 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9148 	 * be advertised then we have no HCI communication to make.
9149 	 * Simply return.
9150 	 */
9151 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9152 		if (adv_instance->pending) {
9153 			mgmt_advertising_added(sk, hdev, cp->instance);
9154 			adv_instance->pending = false;
9155 		}
9156 		rp.instance = cp->instance;
9157 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9158 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9159 		goto unlock;
9160 	}
9161 
9162 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9163 			       data_len);
9164 	if (!cmd) {
9165 		err = -ENOMEM;
9166 		goto clear_new_instance;
9167 	}
9168 
9169 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9170 				 add_ext_adv_data_complete);
9171 	if (err < 0) {
9172 		mgmt_pending_free(cmd);
9173 		goto clear_new_instance;
9174 	}
9175 
9176 	/* We were successful in updating data, so trigger advertising_added
9177 	 * event if this is an instance that wasn't previously advertising. If
9178 	 * a failure occurs in the requests we initiated, we will remove the
9179 	 * instance again in add_advertising_complete
9180 	 */
9181 	if (adv_instance->pending)
9182 		mgmt_advertising_added(sk, hdev, cp->instance);
9183 
9184 	goto unlock;
9185 
9186 clear_new_instance:
9187 	hci_remove_adv_instance(hdev, cp->instance);
9188 
9189 unlock:
9190 	hci_dev_unlock(hdev);
9191 
9192 	return err;
9193 }
9194 
9195 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9196 					int err)
9197 {
9198 	struct mgmt_pending_cmd *cmd = data;
9199 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9200 	struct mgmt_rp_remove_advertising rp;
9201 
9202 	bt_dev_dbg(hdev, "err %d", err);
9203 
9204 	memset(&rp, 0, sizeof(rp));
9205 	rp.instance = cp->instance;
9206 
9207 	if (err)
9208 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9209 				mgmt_status(err));
9210 	else
9211 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9212 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9213 
9214 	mgmt_pending_free(cmd);
9215 }
9216 
9217 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9218 {
9219 	struct mgmt_pending_cmd *cmd = data;
9220 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9221 	int err;
9222 
9223 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9224 	if (err)
9225 		return err;
9226 
9227 	if (list_empty(&hdev->adv_instances))
9228 		err = hci_disable_advertising_sync(hdev);
9229 
9230 	return err;
9231 }
9232 
9233 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9234 			      void *data, u16 data_len)
9235 {
9236 	struct mgmt_cp_remove_advertising *cp = data;
9237 	struct mgmt_pending_cmd *cmd;
9238 	int err;
9239 
9240 	bt_dev_dbg(hdev, "sock %p", sk);
9241 
9242 	hci_dev_lock(hdev);
9243 
9244 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9245 		err = mgmt_cmd_status(sk, hdev->id,
9246 				      MGMT_OP_REMOVE_ADVERTISING,
9247 				      MGMT_STATUS_INVALID_PARAMS);
9248 		goto unlock;
9249 	}
9250 
9251 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9252 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9253 				      MGMT_STATUS_BUSY);
9254 		goto unlock;
9255 	}
9256 
9257 	if (list_empty(&hdev->adv_instances)) {
9258 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9259 				      MGMT_STATUS_INVALID_PARAMS);
9260 		goto unlock;
9261 	}
9262 
9263 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9264 			       data_len);
9265 	if (!cmd) {
9266 		err = -ENOMEM;
9267 		goto unlock;
9268 	}
9269 
9270 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9271 				 remove_advertising_complete);
9272 	if (err < 0)
9273 		mgmt_pending_free(cmd);
9274 
9275 unlock:
9276 	hci_dev_unlock(hdev);
9277 
9278 	return err;
9279 }
9280 
9281 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9282 			     void *data, u16 data_len)
9283 {
9284 	struct mgmt_cp_get_adv_size_info *cp = data;
9285 	struct mgmt_rp_get_adv_size_info rp;
9286 	u32 flags, supported_flags;
9287 
9288 	bt_dev_dbg(hdev, "sock %p", sk);
9289 
9290 	if (!lmp_le_capable(hdev))
9291 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9292 				       MGMT_STATUS_REJECTED);
9293 
9294 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9296 				       MGMT_STATUS_INVALID_PARAMS);
9297 
9298 	flags = __le32_to_cpu(cp->flags);
9299 
9300 	/* The current implementation only supports a subset of the specified
9301 	 * flags.
9302 	 */
9303 	supported_flags = get_supported_adv_flags(hdev);
9304 	if (flags & ~supported_flags)
9305 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9306 				       MGMT_STATUS_INVALID_PARAMS);
9307 
9308 	rp.instance = cp->instance;
9309 	rp.flags = cp->flags;
9310 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9311 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9312 
9313 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9314 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9315 }
9316 
9317 static const struct hci_mgmt_handler mgmt_handlers[] = {
9318 	{ NULL }, /* 0x0000 (no command) */
9319 	{ read_version,            MGMT_READ_VERSION_SIZE,
9320 						HCI_MGMT_NO_HDEV |
9321 						HCI_MGMT_UNTRUSTED },
9322 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9323 						HCI_MGMT_NO_HDEV |
9324 						HCI_MGMT_UNTRUSTED },
9325 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9326 						HCI_MGMT_NO_HDEV |
9327 						HCI_MGMT_UNTRUSTED },
9328 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9329 						HCI_MGMT_UNTRUSTED },
9330 	{ set_powered,             MGMT_SETTING_SIZE },
9331 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9332 	{ set_connectable,         MGMT_SETTING_SIZE },
9333 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9334 	{ set_bondable,            MGMT_SETTING_SIZE },
9335 	{ set_link_security,       MGMT_SETTING_SIZE },
9336 	{ set_ssp,                 MGMT_SETTING_SIZE },
9337 	{ set_hs,                  MGMT_SETTING_SIZE },
9338 	{ set_le,                  MGMT_SETTING_SIZE },
9339 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9340 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9341 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9342 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9343 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9344 						HCI_MGMT_VAR_LEN },
9345 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9346 						HCI_MGMT_VAR_LEN },
9347 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9348 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9349 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9350 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9351 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9352 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9353 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9354 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9355 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9356 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9357 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9358 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9359 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9360 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9361 						HCI_MGMT_VAR_LEN },
9362 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9363 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9364 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9365 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9366 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9367 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9368 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9369 	{ set_advertising,         MGMT_SETTING_SIZE },
9370 	{ set_bredr,               MGMT_SETTING_SIZE },
9371 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9372 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9373 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9374 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9375 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9376 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9377 						HCI_MGMT_VAR_LEN },
9378 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9379 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9380 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9381 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9382 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9383 						HCI_MGMT_VAR_LEN },
9384 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9385 						HCI_MGMT_NO_HDEV |
9386 						HCI_MGMT_UNTRUSTED },
9387 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9388 						HCI_MGMT_UNCONFIGURED |
9389 						HCI_MGMT_UNTRUSTED },
9390 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9391 						HCI_MGMT_UNCONFIGURED },
9392 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9393 						HCI_MGMT_UNCONFIGURED },
9394 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9395 						HCI_MGMT_VAR_LEN },
9396 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9397 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9398 						HCI_MGMT_NO_HDEV |
9399 						HCI_MGMT_UNTRUSTED },
9400 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9401 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9402 						HCI_MGMT_VAR_LEN },
9403 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9404 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9405 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9406 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9407 						HCI_MGMT_UNTRUSTED },
9408 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9409 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9410 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9411 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9412 						HCI_MGMT_VAR_LEN },
9413 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9414 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9415 						HCI_MGMT_UNTRUSTED },
9416 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9417 						HCI_MGMT_UNTRUSTED |
9418 						HCI_MGMT_HDEV_OPTIONAL },
9419 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9420 						HCI_MGMT_VAR_LEN |
9421 						HCI_MGMT_HDEV_OPTIONAL },
9422 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9423 						HCI_MGMT_UNTRUSTED },
9424 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9425 						HCI_MGMT_VAR_LEN },
9426 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9427 						HCI_MGMT_UNTRUSTED },
9428 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9429 						HCI_MGMT_VAR_LEN },
9430 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9431 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9432 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9433 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9434 						HCI_MGMT_VAR_LEN },
9435 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9436 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9437 						HCI_MGMT_VAR_LEN },
9438 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9439 						HCI_MGMT_VAR_LEN },
9440 	{ add_adv_patterns_monitor_rssi,
9441 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9442 						HCI_MGMT_VAR_LEN },
9443 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9444 						HCI_MGMT_VAR_LEN },
9445 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9446 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9447 						HCI_MGMT_VAR_LEN },
9448 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9449 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9450 };
9451 
9452 void mgmt_index_added(struct hci_dev *hdev)
9453 {
9454 	struct mgmt_ev_ext_index ev;
9455 
9456 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9457 		return;
9458 
9459 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9460 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9461 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9462 		ev.type = 0x01;
9463 	} else {
9464 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9465 				 HCI_MGMT_INDEX_EVENTS);
9466 		ev.type = 0x00;
9467 	}
9468 
9469 	ev.bus = hdev->bus;
9470 
9471 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9472 			 HCI_MGMT_EXT_INDEX_EVENTS);
9473 }
9474 
9475 void mgmt_index_removed(struct hci_dev *hdev)
9476 {
9477 	struct mgmt_ev_ext_index ev;
9478 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9479 
9480 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9481 		return;
9482 
9483 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9484 
9485 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9486 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9487 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9488 		ev.type = 0x01;
9489 	} else {
9490 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9491 				 HCI_MGMT_INDEX_EVENTS);
9492 		ev.type = 0x00;
9493 	}
9494 
9495 	ev.bus = hdev->bus;
9496 
9497 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9498 			 HCI_MGMT_EXT_INDEX_EVENTS);
9499 
9500 	/* Cancel any remaining timed work */
9501 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9502 		return;
9503 	cancel_delayed_work_sync(&hdev->discov_off);
9504 	cancel_delayed_work_sync(&hdev->service_cache);
9505 	cancel_delayed_work_sync(&hdev->rpa_expired);
9506 }
9507 
9508 void mgmt_power_on(struct hci_dev *hdev, int err)
9509 {
9510 	struct cmd_lookup match = { NULL, hdev };
9511 
9512 	bt_dev_dbg(hdev, "err %d", err);
9513 
9514 	hci_dev_lock(hdev);
9515 
9516 	if (!err) {
9517 		restart_le_actions(hdev);
9518 		hci_update_passive_scan(hdev);
9519 	}
9520 
9521 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9522 
9523 	new_settings(hdev, match.sk);
9524 
9525 	if (match.sk)
9526 		sock_put(match.sk);
9527 
9528 	hci_dev_unlock(hdev);
9529 }
9530 
9531 void __mgmt_power_off(struct hci_dev *hdev)
9532 {
9533 	struct cmd_lookup match = { NULL, hdev };
9534 	u8 zero_cod[] = { 0, 0, 0 };
9535 
9536 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9537 
9538 	/* If the power off is because of hdev unregistration let
9539 	 * use the appropriate INVALID_INDEX status. Otherwise use
9540 	 * NOT_POWERED. We cover both scenarios here since later in
9541 	 * mgmt_index_removed() any hci_conn callbacks will have already
9542 	 * been triggered, potentially causing misleading DISCONNECTED
9543 	 * status responses.
9544 	 */
9545 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9546 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9547 	else
9548 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9549 
9550 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9551 
9552 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9553 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9554 				   zero_cod, sizeof(zero_cod),
9555 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9556 		ext_info_changed(hdev, NULL);
9557 	}
9558 
9559 	new_settings(hdev, match.sk);
9560 
9561 	if (match.sk)
9562 		sock_put(match.sk);
9563 }
9564 
9565 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9566 {
9567 	struct mgmt_pending_cmd *cmd;
9568 	u8 status;
9569 
9570 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9571 	if (!cmd)
9572 		return;
9573 
9574 	if (err == -ERFKILL)
9575 		status = MGMT_STATUS_RFKILLED;
9576 	else
9577 		status = MGMT_STATUS_FAILED;
9578 
9579 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9580 
9581 	mgmt_pending_remove(cmd);
9582 }
9583 
9584 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9585 		       bool persistent)
9586 {
9587 	struct mgmt_ev_new_link_key ev;
9588 
9589 	memset(&ev, 0, sizeof(ev));
9590 
9591 	ev.store_hint = persistent;
9592 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9593 	ev.key.addr.type = BDADDR_BREDR;
9594 	ev.key.type = key->type;
9595 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9596 	ev.key.pin_len = key->pin_len;
9597 
9598 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9599 }
9600 
9601 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9602 {
9603 	switch (ltk->type) {
9604 	case SMP_LTK:
9605 	case SMP_LTK_RESPONDER:
9606 		if (ltk->authenticated)
9607 			return MGMT_LTK_AUTHENTICATED;
9608 		return MGMT_LTK_UNAUTHENTICATED;
9609 	case SMP_LTK_P256:
9610 		if (ltk->authenticated)
9611 			return MGMT_LTK_P256_AUTH;
9612 		return MGMT_LTK_P256_UNAUTH;
9613 	case SMP_LTK_P256_DEBUG:
9614 		return MGMT_LTK_P256_DEBUG;
9615 	}
9616 
9617 	return MGMT_LTK_UNAUTHENTICATED;
9618 }
9619 
9620 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9621 {
9622 	struct mgmt_ev_new_long_term_key ev;
9623 
9624 	memset(&ev, 0, sizeof(ev));
9625 
9626 	/* Devices using resolvable or non-resolvable random addresses
9627 	 * without providing an identity resolving key don't require
9628 	 * to store long term keys. Their addresses will change the
9629 	 * next time around.
9630 	 *
9631 	 * Only when a remote device provides an identity address
9632 	 * make sure the long term key is stored. If the remote
9633 	 * identity is known, the long term keys are internally
9634 	 * mapped to the identity address. So allow static random
9635 	 * and public addresses here.
9636 	 */
9637 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9638 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9639 		ev.store_hint = 0x00;
9640 	else
9641 		ev.store_hint = persistent;
9642 
9643 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9644 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9645 	ev.key.type = mgmt_ltk_type(key);
9646 	ev.key.enc_size = key->enc_size;
9647 	ev.key.ediv = key->ediv;
9648 	ev.key.rand = key->rand;
9649 
9650 	if (key->type == SMP_LTK)
9651 		ev.key.initiator = 1;
9652 
9653 	/* Make sure we copy only the significant bytes based on the
9654 	 * encryption key size, and set the rest of the value to zeroes.
9655 	 */
9656 	memcpy(ev.key.val, key->val, key->enc_size);
9657 	memset(ev.key.val + key->enc_size, 0,
9658 	       sizeof(ev.key.val) - key->enc_size);
9659 
9660 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9661 }
9662 
9663 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9664 {
9665 	struct mgmt_ev_new_irk ev;
9666 
9667 	memset(&ev, 0, sizeof(ev));
9668 
9669 	ev.store_hint = persistent;
9670 
9671 	bacpy(&ev.rpa, &irk->rpa);
9672 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9673 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9674 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9675 
9676 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9677 }
9678 
9679 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9680 		   bool persistent)
9681 {
9682 	struct mgmt_ev_new_csrk ev;
9683 
9684 	memset(&ev, 0, sizeof(ev));
9685 
9686 	/* Devices using resolvable or non-resolvable random addresses
9687 	 * without providing an identity resolving key don't require
9688 	 * to store signature resolving keys. Their addresses will change
9689 	 * the next time around.
9690 	 *
9691 	 * Only when a remote device provides an identity address
9692 	 * make sure the signature resolving key is stored. So allow
9693 	 * static random and public addresses here.
9694 	 */
9695 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9696 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9697 		ev.store_hint = 0x00;
9698 	else
9699 		ev.store_hint = persistent;
9700 
9701 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9702 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9703 	ev.key.type = csrk->type;
9704 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9705 
9706 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9707 }
9708 
9709 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9710 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9711 			 u16 max_interval, u16 latency, u16 timeout)
9712 {
9713 	struct mgmt_ev_new_conn_param ev;
9714 
9715 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9716 		return;
9717 
9718 	memset(&ev, 0, sizeof(ev));
9719 	bacpy(&ev.addr.bdaddr, bdaddr);
9720 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9721 	ev.store_hint = store_hint;
9722 	ev.min_interval = cpu_to_le16(min_interval);
9723 	ev.max_interval = cpu_to_le16(max_interval);
9724 	ev.latency = cpu_to_le16(latency);
9725 	ev.timeout = cpu_to_le16(timeout);
9726 
9727 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9728 }
9729 
9730 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9731 			   u8 *name, u8 name_len)
9732 {
9733 	struct sk_buff *skb;
9734 	struct mgmt_ev_device_connected *ev;
9735 	u16 eir_len = 0;
9736 	u32 flags = 0;
9737 
9738 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9739 		return;
9740 
9741 	/* allocate buff for LE or BR/EDR adv */
9742 	if (conn->le_adv_data_len > 0)
9743 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9744 				     sizeof(*ev) + conn->le_adv_data_len);
9745 	else
9746 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9747 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9748 				     eir_precalc_len(sizeof(conn->dev_class)));
9749 
9750 	ev = skb_put(skb, sizeof(*ev));
9751 	bacpy(&ev->addr.bdaddr, &conn->dst);
9752 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9753 
9754 	if (conn->out)
9755 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9756 
9757 	ev->flags = __cpu_to_le32(flags);
9758 
9759 	/* We must ensure that the EIR Data fields are ordered and
9760 	 * unique. Keep it simple for now and avoid the problem by not
9761 	 * adding any BR/EDR data to the LE adv.
9762 	 */
9763 	if (conn->le_adv_data_len > 0) {
9764 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9765 		eir_len = conn->le_adv_data_len;
9766 	} else {
9767 		if (name)
9768 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9769 
9770 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9771 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9772 						    conn->dev_class, sizeof(conn->dev_class));
9773 	}
9774 
9775 	ev->eir_len = cpu_to_le16(eir_len);
9776 
9777 	mgmt_event_skb(skb, NULL);
9778 }
9779 
9780 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9781 {
9782 	struct hci_dev *hdev = data;
9783 	struct mgmt_cp_unpair_device *cp = cmd->param;
9784 
9785 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9786 
9787 	cmd->cmd_complete(cmd, 0);
9788 	mgmt_pending_remove(cmd);
9789 }
9790 
9791 bool mgmt_powering_down(struct hci_dev *hdev)
9792 {
9793 	struct mgmt_pending_cmd *cmd;
9794 	struct mgmt_mode *cp;
9795 
9796 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9797 		return true;
9798 
9799 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9800 	if (!cmd)
9801 		return false;
9802 
9803 	cp = cmd->param;
9804 	if (!cp->val)
9805 		return true;
9806 
9807 	return false;
9808 }
9809 
9810 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9811 			      u8 link_type, u8 addr_type, u8 reason,
9812 			      bool mgmt_connected)
9813 {
9814 	struct mgmt_ev_device_disconnected ev;
9815 	struct sock *sk = NULL;
9816 
9817 	if (!mgmt_connected)
9818 		return;
9819 
9820 	if (link_type != ACL_LINK && link_type != LE_LINK)
9821 		return;
9822 
9823 	bacpy(&ev.addr.bdaddr, bdaddr);
9824 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9825 	ev.reason = reason;
9826 
9827 	/* Report disconnects due to suspend */
9828 	if (hdev->suspended)
9829 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9830 
9831 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9832 
9833 	if (sk)
9834 		sock_put(sk);
9835 }
9836 
9837 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9838 			    u8 link_type, u8 addr_type, u8 status)
9839 {
9840 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9841 	struct mgmt_cp_disconnect *cp;
9842 	struct mgmt_pending_cmd *cmd;
9843 
9844 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9845 			     hdev);
9846 
9847 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9848 	if (!cmd)
9849 		return;
9850 
9851 	cp = cmd->param;
9852 
9853 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9854 		return;
9855 
9856 	if (cp->addr.type != bdaddr_type)
9857 		return;
9858 
9859 	cmd->cmd_complete(cmd, mgmt_status(status));
9860 	mgmt_pending_remove(cmd);
9861 }
9862 
9863 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9864 {
9865 	struct mgmt_ev_connect_failed ev;
9866 
9867 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9868 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9869 					 conn->dst_type, status, true);
9870 		return;
9871 	}
9872 
9873 	bacpy(&ev.addr.bdaddr, &conn->dst);
9874 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9875 	ev.status = mgmt_status(status);
9876 
9877 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9878 }
9879 
9880 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9881 {
9882 	struct mgmt_ev_pin_code_request ev;
9883 
9884 	bacpy(&ev.addr.bdaddr, bdaddr);
9885 	ev.addr.type = BDADDR_BREDR;
9886 	ev.secure = secure;
9887 
9888 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9889 }
9890 
9891 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892 				  u8 status)
9893 {
9894 	struct mgmt_pending_cmd *cmd;
9895 
9896 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9897 	if (!cmd)
9898 		return;
9899 
9900 	cmd->cmd_complete(cmd, mgmt_status(status));
9901 	mgmt_pending_remove(cmd);
9902 }
9903 
9904 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9905 				      u8 status)
9906 {
9907 	struct mgmt_pending_cmd *cmd;
9908 
9909 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9910 	if (!cmd)
9911 		return;
9912 
9913 	cmd->cmd_complete(cmd, mgmt_status(status));
9914 	mgmt_pending_remove(cmd);
9915 }
9916 
9917 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9918 			      u8 link_type, u8 addr_type, u32 value,
9919 			      u8 confirm_hint)
9920 {
9921 	struct mgmt_ev_user_confirm_request ev;
9922 
9923 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9924 
9925 	bacpy(&ev.addr.bdaddr, bdaddr);
9926 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9927 	ev.confirm_hint = confirm_hint;
9928 	ev.value = cpu_to_le32(value);
9929 
9930 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9931 			  NULL);
9932 }
9933 
9934 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 			      u8 link_type, u8 addr_type)
9936 {
9937 	struct mgmt_ev_user_passkey_request ev;
9938 
9939 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9940 
9941 	bacpy(&ev.addr.bdaddr, bdaddr);
9942 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9943 
9944 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9945 			  NULL);
9946 }
9947 
9948 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9949 				      u8 link_type, u8 addr_type, u8 status,
9950 				      u8 opcode)
9951 {
9952 	struct mgmt_pending_cmd *cmd;
9953 
9954 	cmd = pending_find(opcode, hdev);
9955 	if (!cmd)
9956 		return -ENOENT;
9957 
9958 	cmd->cmd_complete(cmd, mgmt_status(status));
9959 	mgmt_pending_remove(cmd);
9960 
9961 	return 0;
9962 }
9963 
9964 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9965 				     u8 link_type, u8 addr_type, u8 status)
9966 {
9967 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9968 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9969 }
9970 
9971 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9972 					 u8 link_type, u8 addr_type, u8 status)
9973 {
9974 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9975 					  status,
9976 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9977 }
9978 
9979 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9980 				     u8 link_type, u8 addr_type, u8 status)
9981 {
9982 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9983 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9984 }
9985 
9986 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9987 					 u8 link_type, u8 addr_type, u8 status)
9988 {
9989 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9990 					  status,
9991 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9992 }
9993 
9994 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9995 			     u8 link_type, u8 addr_type, u32 passkey,
9996 			     u8 entered)
9997 {
9998 	struct mgmt_ev_passkey_notify ev;
9999 
10000 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
10001 
10002 	bacpy(&ev.addr.bdaddr, bdaddr);
10003 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
10004 	ev.passkey = __cpu_to_le32(passkey);
10005 	ev.entered = entered;
10006 
10007 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10008 }
10009 
10010 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10011 {
10012 	struct mgmt_ev_auth_failed ev;
10013 	struct mgmt_pending_cmd *cmd;
10014 	u8 status = mgmt_status(hci_status);
10015 
10016 	bacpy(&ev.addr.bdaddr, &conn->dst);
10017 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10018 	ev.status = status;
10019 
10020 	cmd = find_pairing(conn);
10021 
10022 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10023 		    cmd ? cmd->sk : NULL);
10024 
10025 	if (cmd) {
10026 		cmd->cmd_complete(cmd, status);
10027 		mgmt_pending_remove(cmd);
10028 	}
10029 }
10030 
10031 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10032 {
10033 	struct cmd_lookup match = { NULL, hdev };
10034 	bool changed;
10035 
10036 	if (status) {
10037 		u8 mgmt_err = mgmt_status(status);
10038 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10039 				     cmd_status_rsp, &mgmt_err);
10040 		return;
10041 	}
10042 
10043 	if (test_bit(HCI_AUTH, &hdev->flags))
10044 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10045 	else
10046 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10047 
10048 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10049 			     &match);
10050 
10051 	if (changed)
10052 		new_settings(hdev, match.sk);
10053 
10054 	if (match.sk)
10055 		sock_put(match.sk);
10056 }
10057 
10058 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10059 {
10060 	struct cmd_lookup *match = data;
10061 
10062 	if (match->sk == NULL) {
10063 		match->sk = cmd->sk;
10064 		sock_hold(match->sk);
10065 	}
10066 }
10067 
10068 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10069 				    u8 status)
10070 {
10071 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10072 
10073 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10074 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10075 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10076 
10077 	if (!status) {
10078 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10079 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10080 		ext_info_changed(hdev, NULL);
10081 	}
10082 
10083 	if (match.sk)
10084 		sock_put(match.sk);
10085 }
10086 
10087 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10088 {
10089 	struct mgmt_cp_set_local_name ev;
10090 	struct mgmt_pending_cmd *cmd;
10091 
10092 	if (status)
10093 		return;
10094 
10095 	memset(&ev, 0, sizeof(ev));
10096 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10097 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10098 
10099 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10100 	if (!cmd) {
10101 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10102 
10103 		/* If this is a HCI command related to powering on the
10104 		 * HCI dev don't send any mgmt signals.
10105 		 */
10106 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10107 			return;
10108 
10109 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10110 			return;
10111 	}
10112 
10113 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10114 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10115 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10116 }
10117 
10118 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10119 {
10120 	int i;
10121 
10122 	for (i = 0; i < uuid_count; i++) {
10123 		if (!memcmp(uuid, uuids[i], 16))
10124 			return true;
10125 	}
10126 
10127 	return false;
10128 }
10129 
10130 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10131 {
10132 	u16 parsed = 0;
10133 
10134 	while (parsed < eir_len) {
10135 		u8 field_len = eir[0];
10136 		u8 uuid[16];
10137 		int i;
10138 
10139 		if (field_len == 0)
10140 			break;
10141 
10142 		if (eir_len - parsed < field_len + 1)
10143 			break;
10144 
10145 		switch (eir[1]) {
10146 		case EIR_UUID16_ALL:
10147 		case EIR_UUID16_SOME:
10148 			for (i = 0; i + 3 <= field_len; i += 2) {
10149 				memcpy(uuid, bluetooth_base_uuid, 16);
10150 				uuid[13] = eir[i + 3];
10151 				uuid[12] = eir[i + 2];
10152 				if (has_uuid(uuid, uuid_count, uuids))
10153 					return true;
10154 			}
10155 			break;
10156 		case EIR_UUID32_ALL:
10157 		case EIR_UUID32_SOME:
10158 			for (i = 0; i + 5 <= field_len; i += 4) {
10159 				memcpy(uuid, bluetooth_base_uuid, 16);
10160 				uuid[15] = eir[i + 5];
10161 				uuid[14] = eir[i + 4];
10162 				uuid[13] = eir[i + 3];
10163 				uuid[12] = eir[i + 2];
10164 				if (has_uuid(uuid, uuid_count, uuids))
10165 					return true;
10166 			}
10167 			break;
10168 		case EIR_UUID128_ALL:
10169 		case EIR_UUID128_SOME:
10170 			for (i = 0; i + 17 <= field_len; i += 16) {
10171 				memcpy(uuid, eir + i + 2, 16);
10172 				if (has_uuid(uuid, uuid_count, uuids))
10173 					return true;
10174 			}
10175 			break;
10176 		}
10177 
10178 		parsed += field_len + 1;
10179 		eir += field_len + 1;
10180 	}
10181 
10182 	return false;
10183 }
10184 
10185 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10186 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10187 {
10188 	/* If a RSSI threshold has been specified, and
10189 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10190 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10191 	 * is set, let it through for further processing, as we might need to
10192 	 * restart the scan.
10193 	 *
10194 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10195 	 * the results are also dropped.
10196 	 */
10197 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10198 	    (rssi == HCI_RSSI_INVALID ||
10199 	    (rssi < hdev->discovery.rssi &&
10200 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10201 		return  false;
10202 
10203 	if (hdev->discovery.uuid_count != 0) {
10204 		/* If a list of UUIDs is provided in filter, results with no
10205 		 * matching UUID should be dropped.
10206 		 */
10207 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10208 				   hdev->discovery.uuids) &&
10209 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10210 				   hdev->discovery.uuid_count,
10211 				   hdev->discovery.uuids))
10212 			return false;
10213 	}
10214 
10215 	/* If duplicate filtering does not report RSSI changes, then restart
10216 	 * scanning to ensure updated result with updated RSSI values.
10217 	 */
10218 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10219 		/* Validate RSSI value against the RSSI threshold once more. */
10220 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10221 		    rssi < hdev->discovery.rssi)
10222 			return false;
10223 	}
10224 
10225 	return true;
10226 }
10227 
10228 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10229 				  bdaddr_t *bdaddr, u8 addr_type)
10230 {
10231 	struct mgmt_ev_adv_monitor_device_lost ev;
10232 
10233 	ev.monitor_handle = cpu_to_le16(handle);
10234 	bacpy(&ev.addr.bdaddr, bdaddr);
10235 	ev.addr.type = addr_type;
10236 
10237 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10238 		   NULL);
10239 }
10240 
10241 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10242 					       struct sk_buff *skb,
10243 					       struct sock *skip_sk,
10244 					       u16 handle)
10245 {
10246 	struct sk_buff *advmon_skb;
10247 	size_t advmon_skb_len;
10248 	__le16 *monitor_handle;
10249 
10250 	if (!skb)
10251 		return;
10252 
10253 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10254 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10255 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10256 				    advmon_skb_len);
10257 	if (!advmon_skb)
10258 		return;
10259 
10260 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10261 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10262 	 * store monitor_handle of the matched monitor.
10263 	 */
10264 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10265 	*monitor_handle = cpu_to_le16(handle);
10266 	skb_put_data(advmon_skb, skb->data, skb->len);
10267 
10268 	mgmt_event_skb(advmon_skb, skip_sk);
10269 }
10270 
10271 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10272 					  bdaddr_t *bdaddr, bool report_device,
10273 					  struct sk_buff *skb,
10274 					  struct sock *skip_sk)
10275 {
10276 	struct monitored_device *dev, *tmp;
10277 	bool matched = false;
10278 	bool notified = false;
10279 
10280 	/* We have received the Advertisement Report because:
10281 	 * 1. the kernel has initiated active discovery
10282 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10283 	 *    passive scanning
10284 	 * 3. if none of the above is true, we have one or more active
10285 	 *    Advertisement Monitor
10286 	 *
10287 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10288 	 * and report ONLY one advertisement per device for the matched Monitor
10289 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10290 	 *
10291 	 * For case 3, since we are not active scanning and all advertisements
10292 	 * received are due to a matched Advertisement Monitor, report all
10293 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10294 	 */
10295 	if (report_device && !hdev->advmon_pend_notify) {
10296 		mgmt_event_skb(skb, skip_sk);
10297 		return;
10298 	}
10299 
10300 	hdev->advmon_pend_notify = false;
10301 
10302 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10303 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10304 			matched = true;
10305 
10306 			if (!dev->notified) {
10307 				mgmt_send_adv_monitor_device_found(hdev, skb,
10308 								   skip_sk,
10309 								   dev->handle);
10310 				notified = true;
10311 				dev->notified = true;
10312 			}
10313 		}
10314 
10315 		if (!dev->notified)
10316 			hdev->advmon_pend_notify = true;
10317 	}
10318 
10319 	if (!report_device &&
10320 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10321 		/* Handle 0 indicates that we are not active scanning and this
10322 		 * is a subsequent advertisement report for an already matched
10323 		 * Advertisement Monitor or the controller offloading support
10324 		 * is not available.
10325 		 */
10326 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10327 	}
10328 
10329 	if (report_device)
10330 		mgmt_event_skb(skb, skip_sk);
10331 	else
10332 		kfree_skb(skb);
10333 }
10334 
10335 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10336 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10337 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10338 			      u64 instant)
10339 {
10340 	struct sk_buff *skb;
10341 	struct mgmt_ev_mesh_device_found *ev;
10342 	int i, j;
10343 
10344 	if (!hdev->mesh_ad_types[0])
10345 		goto accepted;
10346 
10347 	/* Scan for requested AD types */
10348 	if (eir_len > 0) {
10349 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10350 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10351 				if (!hdev->mesh_ad_types[j])
10352 					break;
10353 
10354 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10355 					goto accepted;
10356 			}
10357 		}
10358 	}
10359 
10360 	if (scan_rsp_len > 0) {
10361 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10362 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10363 				if (!hdev->mesh_ad_types[j])
10364 					break;
10365 
10366 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10367 					goto accepted;
10368 			}
10369 		}
10370 	}
10371 
10372 	return;
10373 
10374 accepted:
10375 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10376 			     sizeof(*ev) + eir_len + scan_rsp_len);
10377 	if (!skb)
10378 		return;
10379 
10380 	ev = skb_put(skb, sizeof(*ev));
10381 
10382 	bacpy(&ev->addr.bdaddr, bdaddr);
10383 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10384 	ev->rssi = rssi;
10385 	ev->flags = cpu_to_le32(flags);
10386 	ev->instant = cpu_to_le64(instant);
10387 
10388 	if (eir_len > 0)
10389 		/* Copy EIR or advertising data into event */
10390 		skb_put_data(skb, eir, eir_len);
10391 
10392 	if (scan_rsp_len > 0)
10393 		/* Append scan response data to event */
10394 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10395 
10396 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10397 
10398 	mgmt_event_skb(skb, NULL);
10399 }
10400 
10401 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10402 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10403 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10404 		       u64 instant)
10405 {
10406 	struct sk_buff *skb;
10407 	struct mgmt_ev_device_found *ev;
10408 	bool report_device = hci_discovery_active(hdev);
10409 
10410 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10411 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10412 				  eir, eir_len, scan_rsp, scan_rsp_len,
10413 				  instant);
10414 
10415 	/* Don't send events for a non-kernel initiated discovery. With
10416 	 * LE one exception is if we have pend_le_reports > 0 in which
10417 	 * case we're doing passive scanning and want these events.
10418 	 */
10419 	if (!hci_discovery_active(hdev)) {
10420 		if (link_type == ACL_LINK)
10421 			return;
10422 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10423 			report_device = true;
10424 		else if (!hci_is_adv_monitoring(hdev))
10425 			return;
10426 	}
10427 
10428 	if (hdev->discovery.result_filtering) {
10429 		/* We are using service discovery */
10430 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10431 				     scan_rsp_len))
10432 			return;
10433 	}
10434 
10435 	if (hdev->discovery.limited) {
10436 		/* Check for limited discoverable bit */
10437 		if (dev_class) {
10438 			if (!(dev_class[1] & 0x20))
10439 				return;
10440 		} else {
10441 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10442 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10443 				return;
10444 		}
10445 	}
10446 
10447 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10448 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10449 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10450 	if (!skb)
10451 		return;
10452 
10453 	ev = skb_put(skb, sizeof(*ev));
10454 
10455 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10456 	 * RSSI value was reported as 0 when not available. This behavior
10457 	 * is kept when using device discovery. This is required for full
10458 	 * backwards compatibility with the API.
10459 	 *
10460 	 * However when using service discovery, the value 127 will be
10461 	 * returned when the RSSI is not available.
10462 	 */
10463 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10464 	    link_type == ACL_LINK)
10465 		rssi = 0;
10466 
10467 	bacpy(&ev->addr.bdaddr, bdaddr);
10468 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10469 	ev->rssi = rssi;
10470 	ev->flags = cpu_to_le32(flags);
10471 
10472 	if (eir_len > 0)
10473 		/* Copy EIR or advertising data into event */
10474 		skb_put_data(skb, eir, eir_len);
10475 
10476 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10477 		u8 eir_cod[5];
10478 
10479 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10480 					   dev_class, 3);
10481 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10482 	}
10483 
10484 	if (scan_rsp_len > 0)
10485 		/* Append scan response data to event */
10486 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10487 
10488 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10489 
10490 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10491 }
10492 
10493 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10494 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10495 {
10496 	struct sk_buff *skb;
10497 	struct mgmt_ev_device_found *ev;
10498 	u16 eir_len = 0;
10499 	u32 flags = 0;
10500 
10501 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10502 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10503 
10504 	ev = skb_put(skb, sizeof(*ev));
10505 	bacpy(&ev->addr.bdaddr, bdaddr);
10506 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10507 	ev->rssi = rssi;
10508 
10509 	if (name)
10510 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10511 	else
10512 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10513 
10514 	ev->eir_len = cpu_to_le16(eir_len);
10515 	ev->flags = cpu_to_le32(flags);
10516 
10517 	mgmt_event_skb(skb, NULL);
10518 }
10519 
10520 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10521 {
10522 	struct mgmt_ev_discovering ev;
10523 
10524 	bt_dev_dbg(hdev, "discovering %u", discovering);
10525 
10526 	memset(&ev, 0, sizeof(ev));
10527 	ev.type = hdev->discovery.type;
10528 	ev.discovering = discovering;
10529 
10530 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10531 }
10532 
10533 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10534 {
10535 	struct mgmt_ev_controller_suspend ev;
10536 
10537 	ev.suspend_state = state;
10538 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10539 }
10540 
10541 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10542 		   u8 addr_type)
10543 {
10544 	struct mgmt_ev_controller_resume ev;
10545 
10546 	ev.wake_reason = reason;
10547 	if (bdaddr) {
10548 		bacpy(&ev.addr.bdaddr, bdaddr);
10549 		ev.addr.type = addr_type;
10550 	} else {
10551 		memset(&ev.addr, 0, sizeof(ev.addr));
10552 	}
10553 
10554 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10555 }
10556 
10557 static struct hci_mgmt_chan chan = {
10558 	.channel	= HCI_CHANNEL_CONTROL,
10559 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10560 	.handlers	= mgmt_handlers,
10561 	.hdev_init	= mgmt_init_hdev,
10562 };
10563 
10564 int mgmt_init(void)
10565 {
10566 	return hci_mgmt_chan_register(&chan);
10567 }
10568 
10569 void mgmt_exit(void)
10570 {
10571 	hci_mgmt_chan_unregister(&chan);
10572 }
10573 
10574 void mgmt_cleanup(struct sock *sk)
10575 {
10576 	struct mgmt_mesh_tx *mesh_tx;
10577 	struct hci_dev *hdev;
10578 
10579 	read_lock(&hci_dev_list_lock);
10580 
10581 	list_for_each_entry(hdev, &hci_dev_list, list) {
10582 		do {
10583 			mesh_tx = mgmt_mesh_next(hdev, sk);
10584 
10585 			if (mesh_tx)
10586 				mesh_send_complete(hdev, mesh_tx, true);
10587 		} while (mesh_tx);
10588 	}
10589 
10590 	read_unlock(&hci_dev_list_lock);
10591 }
10592