xref: /linux/net/bluetooth/mgmt.c (revision 4b42fbc6bd8f73d9ded535d8c61ccaa837ff3bd4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 		return;
1323 
1324 	cp = cmd->param;
1325 
1326 	bt_dev_dbg(hdev, "err %d", err);
1327 
1328 	if (!err) {
1329 		if (cp->val) {
1330 			hci_dev_lock(hdev);
1331 			restart_le_actions(hdev);
1332 			hci_update_passive_scan(hdev);
1333 			hci_dev_unlock(hdev);
1334 		}
1335 
1336 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337 
1338 		/* Only call new_setting for power on as power off is deferred
1339 		 * to hdev->power_off work which does call hci_dev_do_close.
1340 		 */
1341 		if (cp->val)
1342 			new_settings(hdev, cmd->sk);
1343 	} else {
1344 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 				mgmt_status(err));
1346 	}
1347 
1348 	mgmt_pending_remove(cmd);
1349 }
1350 
1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 	struct mgmt_pending_cmd *cmd = data;
1354 	struct mgmt_mode *cp = cmd->param;
1355 
1356 	BT_DBG("%s", hdev->name);
1357 
1358 	return hci_set_powered_sync(hdev, cp->val);
1359 }
1360 
1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 		       u16 len)
1363 {
1364 	struct mgmt_mode *cp = data;
1365 	struct mgmt_pending_cmd *cmd;
1366 	int err;
1367 
1368 	bt_dev_dbg(hdev, "sock %p", sk);
1369 
1370 	if (cp->val != 0x00 && cp->val != 0x01)
1371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 				       MGMT_STATUS_INVALID_PARAMS);
1373 
1374 	hci_dev_lock(hdev);
1375 
1376 	if (!cp->val) {
1377 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1378 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 					      MGMT_STATUS_BUSY);
1380 			goto failed;
1381 		}
1382 	}
1383 
1384 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1385 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1386 				      MGMT_STATUS_BUSY);
1387 		goto failed;
1388 	}
1389 
1390 	if (!!cp->val == hdev_is_powered(hdev)) {
1391 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1392 		goto failed;
1393 	}
1394 
1395 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1396 	if (!cmd) {
1397 		err = -ENOMEM;
1398 		goto failed;
1399 	}
1400 
1401 	/* Cancel potentially blocking sync operation before power off */
1402 	if (cp->val == 0x00) {
1403 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1404 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1405 					 mgmt_set_powered_complete);
1406 	} else {
1407 		/* Use hci_cmd_sync_submit since hdev might not be running */
1408 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1409 					  mgmt_set_powered_complete);
1410 	}
1411 
1412 	if (err < 0)
1413 		mgmt_pending_remove(cmd);
1414 
1415 failed:
1416 	hci_dev_unlock(hdev);
1417 	return err;
1418 }
1419 
1420 int mgmt_new_settings(struct hci_dev *hdev)
1421 {
1422 	return new_settings(hdev, NULL);
1423 }
1424 
1425 struct cmd_lookup {
1426 	struct sock *sk;
1427 	struct hci_dev *hdev;
1428 	u8 mgmt_status;
1429 };
1430 
1431 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1432 {
1433 	struct cmd_lookup *match = data;
1434 
1435 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1436 
1437 	list_del(&cmd->list);
1438 
1439 	if (match->sk == NULL) {
1440 		match->sk = cmd->sk;
1441 		sock_hold(match->sk);
1442 	}
1443 
1444 	mgmt_pending_free(cmd);
1445 }
1446 
1447 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 	u8 *status = data;
1450 
1451 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1452 	mgmt_pending_remove(cmd);
1453 }
1454 
1455 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1456 {
1457 	struct cmd_lookup *match = data;
1458 
1459 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1460 	 * removed/freed.
1461 	 */
1462 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1463 
1464 	if (cmd->cmd_complete) {
1465 		cmd->cmd_complete(cmd, match->mgmt_status);
1466 		mgmt_pending_remove(cmd);
1467 
1468 		return;
1469 	}
1470 
1471 	cmd_status_rsp(cmd, data);
1472 }
1473 
1474 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1475 {
1476 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1477 				 cmd->param, cmd->param_len);
1478 }
1479 
1480 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1481 {
1482 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1483 				 cmd->param, sizeof(struct mgmt_addr_info));
1484 }
1485 
1486 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1487 {
1488 	if (!lmp_bredr_capable(hdev))
1489 		return MGMT_STATUS_NOT_SUPPORTED;
1490 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1491 		return MGMT_STATUS_REJECTED;
1492 	else
1493 		return MGMT_STATUS_SUCCESS;
1494 }
1495 
1496 static u8 mgmt_le_support(struct hci_dev *hdev)
1497 {
1498 	if (!lmp_le_capable(hdev))
1499 		return MGMT_STATUS_NOT_SUPPORTED;
1500 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1501 		return MGMT_STATUS_REJECTED;
1502 	else
1503 		return MGMT_STATUS_SUCCESS;
1504 }
1505 
1506 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1507 					   int err)
1508 {
1509 	struct mgmt_pending_cmd *cmd = data;
1510 
1511 	bt_dev_dbg(hdev, "err %d", err);
1512 
1513 	/* Make sure cmd still outstanding. */
1514 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1515 		return;
1516 
1517 	hci_dev_lock(hdev);
1518 
1519 	if (err) {
1520 		u8 mgmt_err = mgmt_status(err);
1521 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1522 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1523 		goto done;
1524 	}
1525 
1526 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1527 	    hdev->discov_timeout > 0) {
1528 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1529 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1530 	}
1531 
1532 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533 	new_settings(hdev, cmd->sk);
1534 
1535 done:
1536 	mgmt_pending_remove(cmd);
1537 	hci_dev_unlock(hdev);
1538 }
1539 
1540 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1541 {
1542 	BT_DBG("%s", hdev->name);
1543 
1544 	return hci_update_discoverable_sync(hdev);
1545 }
1546 
1547 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1548 			    u16 len)
1549 {
1550 	struct mgmt_cp_set_discoverable *cp = data;
1551 	struct mgmt_pending_cmd *cmd;
1552 	u16 timeout;
1553 	int err;
1554 
1555 	bt_dev_dbg(hdev, "sock %p", sk);
1556 
1557 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1558 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 				       MGMT_STATUS_REJECTED);
1561 
1562 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1563 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1564 				       MGMT_STATUS_INVALID_PARAMS);
1565 
1566 	timeout = __le16_to_cpu(cp->timeout);
1567 
1568 	/* Disabling discoverable requires that no timeout is set,
1569 	 * and enabling limited discoverable requires a timeout.
1570 	 */
1571 	if ((cp->val == 0x00 && timeout > 0) ||
1572 	    (cp->val == 0x02 && timeout == 0))
1573 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				       MGMT_STATUS_INVALID_PARAMS);
1575 
1576 	hci_dev_lock(hdev);
1577 
1578 	if (!hdev_is_powered(hdev) && timeout > 0) {
1579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 				      MGMT_STATUS_NOT_POWERED);
1581 		goto failed;
1582 	}
1583 
1584 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 				      MGMT_STATUS_REJECTED);
1594 		goto failed;
1595 	}
1596 
1597 	if (hdev->advertising_paused) {
1598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 				      MGMT_STATUS_BUSY);
1600 		goto failed;
1601 	}
1602 
1603 	if (!hdev_is_powered(hdev)) {
1604 		bool changed = false;
1605 
1606 		/* Setting limited discoverable when powered off is
1607 		 * not a valid operation since it requires a timeout
1608 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1609 		 */
1610 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1611 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1612 			changed = true;
1613 		}
1614 
1615 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1616 		if (err < 0)
1617 			goto failed;
1618 
1619 		if (changed)
1620 			err = new_settings(hdev, sk);
1621 
1622 		goto failed;
1623 	}
1624 
1625 	/* If the current mode is the same, then just update the timeout
1626 	 * value with the new value. And if only the timeout gets updated,
1627 	 * then no need for any HCI transactions.
1628 	 */
1629 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1630 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1631 						   HCI_LIMITED_DISCOVERABLE)) {
1632 		cancel_delayed_work(&hdev->discov_off);
1633 		hdev->discov_timeout = timeout;
1634 
1635 		if (cp->val && hdev->discov_timeout > 0) {
1636 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1637 			queue_delayed_work(hdev->req_workqueue,
1638 					   &hdev->discov_off, to);
1639 		}
1640 
1641 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1642 		goto failed;
1643 	}
1644 
1645 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1646 	if (!cmd) {
1647 		err = -ENOMEM;
1648 		goto failed;
1649 	}
1650 
1651 	/* Cancel any potential discoverable timeout that might be
1652 	 * still active and store new timeout value. The arming of
1653 	 * the timeout happens in the complete handler.
1654 	 */
1655 	cancel_delayed_work(&hdev->discov_off);
1656 	hdev->discov_timeout = timeout;
1657 
1658 	if (cp->val)
1659 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1660 	else
1661 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1662 
1663 	/* Limited discoverable mode */
1664 	if (cp->val == 0x02)
1665 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1666 	else
1667 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 
1669 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1670 				 mgmt_set_discoverable_complete);
1671 
1672 	if (err < 0)
1673 		mgmt_pending_remove(cmd);
1674 
1675 failed:
1676 	hci_dev_unlock(hdev);
1677 	return err;
1678 }
1679 
1680 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1681 					  int err)
1682 {
1683 	struct mgmt_pending_cmd *cmd = data;
1684 
1685 	bt_dev_dbg(hdev, "err %d", err);
1686 
1687 	/* Make sure cmd still outstanding. */
1688 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1689 		return;
1690 
1691 	hci_dev_lock(hdev);
1692 
1693 	if (err) {
1694 		u8 mgmt_err = mgmt_status(err);
1695 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1696 		goto done;
1697 	}
1698 
1699 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1700 	new_settings(hdev, cmd->sk);
1701 
1702 done:
1703 	mgmt_pending_remove(cmd);
1704 
1705 	hci_dev_unlock(hdev);
1706 }
1707 
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 					   struct sock *sk, u8 val)
1710 {
1711 	bool changed = false;
1712 	int err;
1713 
1714 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1715 		changed = true;
1716 
1717 	if (val) {
1718 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719 	} else {
1720 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1722 	}
1723 
1724 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725 	if (err < 0)
1726 		return err;
1727 
1728 	if (changed) {
1729 		hci_update_scan(hdev);
1730 		hci_update_passive_scan(hdev);
1731 		return new_settings(hdev, sk);
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 {
1739 	BT_DBG("%s", hdev->name);
1740 
1741 	return hci_update_connectable_sync(hdev);
1742 }
1743 
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1745 			   u16 len)
1746 {
1747 	struct mgmt_mode *cp = data;
1748 	struct mgmt_pending_cmd *cmd;
1749 	int err;
1750 
1751 	bt_dev_dbg(hdev, "sock %p", sk);
1752 
1753 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 				       MGMT_STATUS_REJECTED);
1757 
1758 	if (cp->val != 0x00 && cp->val != 0x01)
1759 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				       MGMT_STATUS_INVALID_PARAMS);
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (!hdev_is_powered(hdev)) {
1765 		err = set_connectable_update_settings(hdev, sk, cp->val);
1766 		goto failed;
1767 	}
1768 
1769 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				      MGMT_STATUS_BUSY);
1773 		goto failed;
1774 	}
1775 
1776 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1777 	if (!cmd) {
1778 		err = -ENOMEM;
1779 		goto failed;
1780 	}
1781 
1782 	if (cp->val) {
1783 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784 	} else {
1785 		if (hdev->discov_timeout > 0)
1786 			cancel_delayed_work(&hdev->discov_off);
1787 
1788 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1791 	}
1792 
1793 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 				 mgmt_set_connectable_complete);
1795 
1796 	if (err < 0)
1797 		mgmt_pending_remove(cmd);
1798 
1799 failed:
1800 	hci_dev_unlock(hdev);
1801 	return err;
1802 }
1803 
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1805 			u16 len)
1806 {
1807 	struct mgmt_mode *cp = data;
1808 	bool changed;
1809 	int err;
1810 
1811 	bt_dev_dbg(hdev, "sock %p", sk);
1812 
1813 	if (cp->val != 0x00 && cp->val != 0x01)
1814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 				       MGMT_STATUS_INVALID_PARAMS);
1816 
1817 	hci_dev_lock(hdev);
1818 
1819 	if (cp->val)
1820 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821 	else
1822 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823 
1824 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1825 	if (err < 0)
1826 		goto unlock;
1827 
1828 	if (changed) {
1829 		/* In limited privacy mode the change of bondable mode
1830 		 * may affect the local advertising address.
1831 		 */
1832 		hci_update_discoverable(hdev);
1833 
1834 		err = new_settings(hdev, sk);
1835 	}
1836 
1837 unlock:
1838 	hci_dev_unlock(hdev);
1839 	return err;
1840 }
1841 
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1843 			     u16 len)
1844 {
1845 	struct mgmt_mode *cp = data;
1846 	struct mgmt_pending_cmd *cmd;
1847 	u8 val, status;
1848 	int err;
1849 
1850 	bt_dev_dbg(hdev, "sock %p", sk);
1851 
1852 	status = mgmt_bredr_support(hdev);
1853 	if (status)
1854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855 				       status);
1856 
1857 	if (cp->val != 0x00 && cp->val != 0x01)
1858 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 				       MGMT_STATUS_INVALID_PARAMS);
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (!hdev_is_powered(hdev)) {
1864 		bool changed = false;
1865 
1866 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1868 			changed = true;
1869 		}
1870 
1871 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1872 		if (err < 0)
1873 			goto failed;
1874 
1875 		if (changed)
1876 			err = new_settings(hdev, sk);
1877 
1878 		goto failed;
1879 	}
1880 
1881 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883 				      MGMT_STATUS_BUSY);
1884 		goto failed;
1885 	}
1886 
1887 	val = !!cp->val;
1888 
1889 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1891 		goto failed;
1892 	}
1893 
1894 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1895 	if (!cmd) {
1896 		err = -ENOMEM;
1897 		goto failed;
1898 	}
1899 
1900 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901 	if (err < 0) {
1902 		mgmt_pending_remove(cmd);
1903 		goto failed;
1904 	}
1905 
1906 failed:
1907 	hci_dev_unlock(hdev);
1908 	return err;
1909 }
1910 
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 {
1913 	struct cmd_lookup match = { NULL, hdev };
1914 	struct mgmt_pending_cmd *cmd = data;
1915 	struct mgmt_mode *cp = cmd->param;
1916 	u8 enable = cp->val;
1917 	bool changed;
1918 
1919 	/* Make sure cmd still outstanding. */
1920 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1921 		return;
1922 
1923 	if (err) {
1924 		u8 mgmt_err = mgmt_status(err);
1925 
1926 		if (enable && hci_dev_test_and_clear_flag(hdev,
1927 							  HCI_SSP_ENABLED)) {
1928 			new_settings(hdev, NULL);
1929 		}
1930 
1931 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932 				     &mgmt_err);
1933 		return;
1934 	}
1935 
1936 	if (enable) {
1937 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938 	} else {
1939 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940 	}
1941 
1942 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1943 
1944 	if (changed)
1945 		new_settings(hdev, match.sk);
1946 
1947 	if (match.sk)
1948 		sock_put(match.sk);
1949 
1950 	hci_update_eir_sync(hdev);
1951 }
1952 
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 {
1955 	struct mgmt_pending_cmd *cmd = data;
1956 	struct mgmt_mode *cp = cmd->param;
1957 	bool changed = false;
1958 	int err;
1959 
1960 	if (cp->val)
1961 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962 
1963 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1964 
1965 	if (!err && changed)
1966 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967 
1968 	return err;
1969 }
1970 
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 	struct mgmt_mode *cp = data;
1974 	struct mgmt_pending_cmd *cmd;
1975 	u8 status;
1976 	int err;
1977 
1978 	bt_dev_dbg(hdev, "sock %p", sk);
1979 
1980 	status = mgmt_bredr_support(hdev);
1981 	if (status)
1982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983 
1984 	if (!lmp_ssp_capable(hdev))
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_NOT_SUPPORTED);
1987 
1988 	if (cp->val != 0x00 && cp->val != 0x01)
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 				       MGMT_STATUS_INVALID_PARAMS);
1991 
1992 	hci_dev_lock(hdev);
1993 
1994 	if (!hdev_is_powered(hdev)) {
1995 		bool changed;
1996 
1997 		if (cp->val) {
1998 			changed = !hci_dev_test_and_set_flag(hdev,
1999 							     HCI_SSP_ENABLED);
2000 		} else {
2001 			changed = hci_dev_test_and_clear_flag(hdev,
2002 							      HCI_SSP_ENABLED);
2003 		}
2004 
2005 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2006 		if (err < 0)
2007 			goto failed;
2008 
2009 		if (changed)
2010 			err = new_settings(hdev, sk);
2011 
2012 		goto failed;
2013 	}
2014 
2015 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2017 				      MGMT_STATUS_BUSY);
2018 		goto failed;
2019 	}
2020 
2021 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023 		goto failed;
2024 	}
2025 
2026 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2027 	if (!cmd)
2028 		err = -ENOMEM;
2029 	else
2030 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2031 					 set_ssp_complete);
2032 
2033 	if (err < 0) {
2034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 				      MGMT_STATUS_FAILED);
2036 
2037 		if (cmd)
2038 			mgmt_pending_remove(cmd);
2039 	}
2040 
2041 failed:
2042 	hci_dev_unlock(hdev);
2043 	return err;
2044 }
2045 
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 {
2048 	bt_dev_dbg(hdev, "sock %p", sk);
2049 
2050 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 				       MGMT_STATUS_NOT_SUPPORTED);
2052 }
2053 
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2055 {
2056 	struct cmd_lookup match = { NULL, hdev };
2057 	u8 status = mgmt_status(err);
2058 
2059 	bt_dev_dbg(hdev, "err %d", err);
2060 
2061 	if (status) {
2062 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2063 							&status);
2064 		return;
2065 	}
2066 
2067 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2068 
2069 	new_settings(hdev, match.sk);
2070 
2071 	if (match.sk)
2072 		sock_put(match.sk);
2073 }
2074 
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2076 {
2077 	struct mgmt_pending_cmd *cmd = data;
2078 	struct mgmt_mode *cp = cmd->param;
2079 	u8 val = !!cp->val;
2080 	int err;
2081 
2082 	if (!val) {
2083 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2084 
2085 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 			hci_disable_advertising_sync(hdev);
2087 
2088 		if (ext_adv_capable(hdev))
2089 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2090 	} else {
2091 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2092 	}
2093 
2094 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2095 
2096 	/* Make sure the controller has a good default for
2097 	 * advertising data. Restrict the update to when LE
2098 	 * has actually been enabled. During power on, the
2099 	 * update in powered_update_hci will take care of it.
2100 	 */
2101 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 		if (ext_adv_capable(hdev)) {
2103 			int status;
2104 
2105 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2106 			if (!status)
2107 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2108 		} else {
2109 			hci_update_adv_data_sync(hdev, 0x00);
2110 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2111 		}
2112 
2113 		hci_update_passive_scan(hdev);
2114 	}
2115 
2116 	return err;
2117 }
2118 
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121 	struct mgmt_pending_cmd *cmd = data;
2122 	u8 status = mgmt_status(err);
2123 	struct sock *sk = cmd->sk;
2124 
2125 	if (status) {
2126 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 				     cmd_status_rsp, &status);
2128 		return;
2129 	}
2130 
2131 	mgmt_pending_remove(cmd);
2132 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2133 }
2134 
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2136 {
2137 	struct mgmt_pending_cmd *cmd = data;
2138 	struct mgmt_cp_set_mesh *cp = cmd->param;
2139 	size_t len = cmd->param_len;
2140 
2141 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2142 
2143 	if (cp->enable)
2144 		hci_dev_set_flag(hdev, HCI_MESH);
2145 	else
2146 		hci_dev_clear_flag(hdev, HCI_MESH);
2147 
2148 	len -= sizeof(*cp);
2149 
2150 	/* If filters don't fit, forward all adv pkts */
2151 	if (len <= sizeof(hdev->mesh_ad_types))
2152 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2153 
2154 	hci_update_passive_scan_sync(hdev);
2155 	return 0;
2156 }
2157 
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2159 {
2160 	struct mgmt_cp_set_mesh *cp = data;
2161 	struct mgmt_pending_cmd *cmd;
2162 	int err = 0;
2163 
2164 	bt_dev_dbg(hdev, "sock %p", sk);
2165 
2166 	if (!lmp_le_capable(hdev) ||
2167 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 				       MGMT_STATUS_NOT_SUPPORTED);
2170 
2171 	if (cp->enable != 0x00 && cp->enable != 0x01)
2172 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 				       MGMT_STATUS_INVALID_PARAMS);
2174 
2175 	hci_dev_lock(hdev);
2176 
2177 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2178 	if (!cmd)
2179 		err = -ENOMEM;
2180 	else
2181 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2182 					 set_mesh_complete);
2183 
2184 	if (err < 0) {
2185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 				      MGMT_STATUS_FAILED);
2187 
2188 		if (cmd)
2189 			mgmt_pending_remove(cmd);
2190 	}
2191 
2192 	hci_dev_unlock(hdev);
2193 	return err;
2194 }
2195 
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2197 {
2198 	struct mgmt_mesh_tx *mesh_tx = data;
2199 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 	unsigned long mesh_send_interval;
2201 	u8 mgmt_err = mgmt_status(err);
2202 
2203 	/* Report any errors here, but don't report completion */
2204 
2205 	if (mgmt_err) {
2206 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 		/* Send Complete Error Code for handle */
2208 		mesh_send_complete(hdev, mesh_tx, false);
2209 		return;
2210 	}
2211 
2212 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 			   mesh_send_interval);
2215 }
2216 
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2218 {
2219 	struct mgmt_mesh_tx *mesh_tx = data;
2220 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 	struct adv_info *adv, *next_instance;
2222 	u8 instance = hdev->le_num_of_adv_sets + 1;
2223 	u16 timeout, duration;
2224 	int err = 0;
2225 
2226 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 		return MGMT_STATUS_BUSY;
2228 
2229 	timeout = 1000;
2230 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 	adv = hci_add_adv_instance(hdev, instance, 0,
2232 				   send->adv_data_len, send->adv_data,
2233 				   0, NULL,
2234 				   timeout, duration,
2235 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 				   hdev->le_adv_min_interval,
2237 				   hdev->le_adv_max_interval,
2238 				   mesh_tx->handle);
2239 
2240 	if (!IS_ERR(adv))
2241 		mesh_tx->instance = instance;
2242 	else
2243 		err = PTR_ERR(adv);
2244 
2245 	if (hdev->cur_adv_instance == instance) {
2246 		/* If the currently advertised instance is being changed then
2247 		 * cancel the current advertising and schedule the next
2248 		 * instance. If there is only one instance then the overridden
2249 		 * advertising data will be visible right away.
2250 		 */
2251 		cancel_adv_timeout(hdev);
2252 
2253 		next_instance = hci_get_next_instance(hdev, instance);
2254 		if (next_instance)
2255 			instance = next_instance->instance;
2256 		else
2257 			instance = 0;
2258 	} else if (hdev->adv_instance_timeout) {
2259 		/* Immediately advertise the new instance if no other, or
2260 		 * let it go naturally from queue if ADV is already happening
2261 		 */
2262 		instance = 0;
2263 	}
2264 
2265 	if (instance)
2266 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2267 
2268 	return err;
2269 }
2270 
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2272 {
2273 	struct mgmt_rp_mesh_read_features *rp = data;
2274 
2275 	if (rp->used_handles >= rp->max_handles)
2276 		return;
2277 
2278 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2279 }
2280 
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 			 void *data, u16 len)
2283 {
2284 	struct mgmt_rp_mesh_read_features rp;
2285 
2286 	if (!lmp_le_capable(hdev) ||
2287 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 				       MGMT_STATUS_NOT_SUPPORTED);
2290 
2291 	memset(&rp, 0, sizeof(rp));
2292 	rp.index = cpu_to_le16(hdev->id);
2293 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 		rp.max_handles = MESH_HANDLES_MAX;
2295 
2296 	hci_dev_lock(hdev);
2297 
2298 	if (rp.max_handles)
2299 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2300 
2301 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2303 
2304 	hci_dev_unlock(hdev);
2305 	return 0;
2306 }
2307 
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2309 {
2310 	struct mgmt_pending_cmd *cmd = data;
2311 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 	struct mgmt_mesh_tx *mesh_tx;
2313 
2314 	if (!cancel->handle) {
2315 		do {
2316 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2317 
2318 			if (mesh_tx)
2319 				mesh_send_complete(hdev, mesh_tx, false);
2320 		} while (mesh_tx);
2321 	} else {
2322 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2323 
2324 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 			mesh_send_complete(hdev, mesh_tx, false);
2326 	}
2327 
2328 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2329 			  0, NULL, 0);
2330 	mgmt_pending_free(cmd);
2331 
2332 	return 0;
2333 }
2334 
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 			    void *data, u16 len)
2337 {
2338 	struct mgmt_pending_cmd *cmd;
2339 	int err;
2340 
2341 	if (!lmp_le_capable(hdev) ||
2342 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 				       MGMT_STATUS_NOT_SUPPORTED);
2345 
2346 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 				       MGMT_STATUS_REJECTED);
2349 
2350 	hci_dev_lock(hdev);
2351 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2352 	if (!cmd)
2353 		err = -ENOMEM;
2354 	else
2355 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2356 
2357 	if (err < 0) {
2358 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				      MGMT_STATUS_FAILED);
2360 
2361 		if (cmd)
2362 			mgmt_pending_free(cmd);
2363 	}
2364 
2365 	hci_dev_unlock(hdev);
2366 	return err;
2367 }
2368 
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2370 {
2371 	struct mgmt_mesh_tx *mesh_tx;
2372 	struct mgmt_cp_mesh_send *send = data;
2373 	struct mgmt_rp_mesh_read_features rp;
2374 	bool sending;
2375 	int err = 0;
2376 
2377 	if (!lmp_le_capable(hdev) ||
2378 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 				       MGMT_STATUS_NOT_SUPPORTED);
2381 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 	    len <= MGMT_MESH_SEND_SIZE ||
2383 	    len > (MGMT_MESH_SEND_SIZE + 31))
2384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 				       MGMT_STATUS_REJECTED);
2386 
2387 	hci_dev_lock(hdev);
2388 
2389 	memset(&rp, 0, sizeof(rp));
2390 	rp.max_handles = MESH_HANDLES_MAX;
2391 
2392 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2393 
2394 	if (rp.max_handles <= rp.used_handles) {
2395 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2396 				      MGMT_STATUS_BUSY);
2397 		goto done;
2398 	}
2399 
2400 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2402 
2403 	if (!mesh_tx)
2404 		err = -ENOMEM;
2405 	else if (!sending)
2406 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 					 mesh_send_start_complete);
2408 
2409 	if (err < 0) {
2410 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 				      MGMT_STATUS_FAILED);
2413 
2414 		if (mesh_tx) {
2415 			if (sending)
2416 				mgmt_mesh_remove(mesh_tx);
2417 		}
2418 	} else {
2419 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2420 
2421 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 				  &mesh_tx->handle, 1);
2423 	}
2424 
2425 done:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 {
2432 	struct mgmt_mode *cp = data;
2433 	struct mgmt_pending_cmd *cmd;
2434 	int err;
2435 	u8 val, enabled;
2436 
2437 	bt_dev_dbg(hdev, "sock %p", sk);
2438 
2439 	if (!lmp_le_capable(hdev))
2440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 				       MGMT_STATUS_NOT_SUPPORTED);
2442 
2443 	if (cp->val != 0x00 && cp->val != 0x01)
2444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 				       MGMT_STATUS_INVALID_PARAMS);
2446 
2447 	/* Bluetooth single mode LE only controllers or dual-mode
2448 	 * controllers configured as LE only devices, do not allow
2449 	 * switching LE off. These have either LE enabled explicitly
2450 	 * or BR/EDR has been previously switched off.
2451 	 *
2452 	 * When trying to enable an already enabled LE, then gracefully
2453 	 * send a positive response. Trying to disable it however will
2454 	 * result into rejection.
2455 	 */
2456 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 		if (cp->val == 0x01)
2458 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2459 
2460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 				       MGMT_STATUS_REJECTED);
2462 	}
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	val = !!cp->val;
2467 	enabled = lmp_host_le_capable(hdev);
2468 
2469 	if (!hdev_is_powered(hdev) || val == enabled) {
2470 		bool changed = false;
2471 
2472 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2474 			changed = true;
2475 		}
2476 
2477 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2479 			changed = true;
2480 		}
2481 
2482 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2483 		if (err < 0)
2484 			goto unlock;
2485 
2486 		if (changed)
2487 			err = new_settings(hdev, sk);
2488 
2489 		goto unlock;
2490 	}
2491 
2492 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				      MGMT_STATUS_BUSY);
2496 		goto unlock;
2497 	}
2498 
2499 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2500 	if (!cmd)
2501 		err = -ENOMEM;
2502 	else
2503 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2504 					 set_le_complete);
2505 
2506 	if (err < 0) {
2507 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 				      MGMT_STATUS_FAILED);
2509 
2510 		if (cmd)
2511 			mgmt_pending_remove(cmd);
2512 	}
2513 
2514 unlock:
2515 	hci_dev_unlock(hdev);
2516 	return err;
2517 }
2518 
2519 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2520 {
2521 	struct mgmt_pending_cmd *cmd = data;
2522 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2523 	struct sk_buff *skb;
2524 
2525 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2526 				le16_to_cpu(cp->params_len), cp->params,
2527 				cp->event, cp->timeout ?
2528 				msecs_to_jiffies(cp->timeout * 1000) :
2529 				HCI_CMD_TIMEOUT);
2530 	if (IS_ERR(skb)) {
2531 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2532 				mgmt_status(PTR_ERR(skb)));
2533 		goto done;
2534 	}
2535 
2536 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2537 			  skb->data, skb->len);
2538 
2539 	kfree_skb(skb);
2540 
2541 done:
2542 	mgmt_pending_free(cmd);
2543 
2544 	return 0;
2545 }
2546 
2547 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2548 			     void *data, u16 len)
2549 {
2550 	struct mgmt_cp_hci_cmd_sync *cp = data;
2551 	struct mgmt_pending_cmd *cmd;
2552 	int err;
2553 
2554 	if (len < sizeof(*cp))
2555 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2556 				       MGMT_STATUS_INVALID_PARAMS);
2557 
2558 	hci_dev_lock(hdev);
2559 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2560 	if (!cmd)
2561 		err = -ENOMEM;
2562 	else
2563 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2564 
2565 	if (err < 0) {
2566 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2567 				      MGMT_STATUS_FAILED);
2568 
2569 		if (cmd)
2570 			mgmt_pending_free(cmd);
2571 	}
2572 
2573 	hci_dev_unlock(hdev);
2574 	return err;
2575 }
2576 
2577 /* This is a helper function to test for pending mgmt commands that can
2578  * cause CoD or EIR HCI commands. We can only allow one such pending
2579  * mgmt command at a time since otherwise we cannot easily track what
2580  * the current values are, will be, and based on that calculate if a new
2581  * HCI command needs to be sent and if yes with what value.
2582  */
2583 static bool pending_eir_or_class(struct hci_dev *hdev)
2584 {
2585 	struct mgmt_pending_cmd *cmd;
2586 
2587 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2588 		switch (cmd->opcode) {
2589 		case MGMT_OP_ADD_UUID:
2590 		case MGMT_OP_REMOVE_UUID:
2591 		case MGMT_OP_SET_DEV_CLASS:
2592 		case MGMT_OP_SET_POWERED:
2593 			return true;
2594 		}
2595 	}
2596 
2597 	return false;
2598 }
2599 
2600 static const u8 bluetooth_base_uuid[] = {
2601 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2602 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2603 };
2604 
2605 static u8 get_uuid_size(const u8 *uuid)
2606 {
2607 	u32 val;
2608 
2609 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2610 		return 128;
2611 
2612 	val = get_unaligned_le32(&uuid[12]);
2613 	if (val > 0xffff)
2614 		return 32;
2615 
2616 	return 16;
2617 }
2618 
2619 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2620 {
2621 	struct mgmt_pending_cmd *cmd = data;
2622 
2623 	bt_dev_dbg(hdev, "err %d", err);
2624 
2625 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2626 			  mgmt_status(err), hdev->dev_class, 3);
2627 
2628 	mgmt_pending_free(cmd);
2629 }
2630 
2631 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2632 {
2633 	int err;
2634 
2635 	err = hci_update_class_sync(hdev);
2636 	if (err)
2637 		return err;
2638 
2639 	return hci_update_eir_sync(hdev);
2640 }
2641 
2642 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2643 {
2644 	struct mgmt_cp_add_uuid *cp = data;
2645 	struct mgmt_pending_cmd *cmd;
2646 	struct bt_uuid *uuid;
2647 	int err;
2648 
2649 	bt_dev_dbg(hdev, "sock %p", sk);
2650 
2651 	hci_dev_lock(hdev);
2652 
2653 	if (pending_eir_or_class(hdev)) {
2654 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2655 				      MGMT_STATUS_BUSY);
2656 		goto failed;
2657 	}
2658 
2659 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2660 	if (!uuid) {
2661 		err = -ENOMEM;
2662 		goto failed;
2663 	}
2664 
2665 	memcpy(uuid->uuid, cp->uuid, 16);
2666 	uuid->svc_hint = cp->svc_hint;
2667 	uuid->size = get_uuid_size(cp->uuid);
2668 
2669 	list_add_tail(&uuid->list, &hdev->uuids);
2670 
2671 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2672 	if (!cmd) {
2673 		err = -ENOMEM;
2674 		goto failed;
2675 	}
2676 
2677 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2678 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2679 	 */
2680 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2681 				  mgmt_class_complete);
2682 	if (err < 0) {
2683 		mgmt_pending_free(cmd);
2684 		goto failed;
2685 	}
2686 
2687 failed:
2688 	hci_dev_unlock(hdev);
2689 	return err;
2690 }
2691 
2692 static bool enable_service_cache(struct hci_dev *hdev)
2693 {
2694 	if (!hdev_is_powered(hdev))
2695 		return false;
2696 
2697 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2698 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2699 				   CACHE_TIMEOUT);
2700 		return true;
2701 	}
2702 
2703 	return false;
2704 }
2705 
2706 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2707 {
2708 	int err;
2709 
2710 	err = hci_update_class_sync(hdev);
2711 	if (err)
2712 		return err;
2713 
2714 	return hci_update_eir_sync(hdev);
2715 }
2716 
2717 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2718 		       u16 len)
2719 {
2720 	struct mgmt_cp_remove_uuid *cp = data;
2721 	struct mgmt_pending_cmd *cmd;
2722 	struct bt_uuid *match, *tmp;
2723 	static const u8 bt_uuid_any[] = {
2724 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2725 	};
2726 	int err, found;
2727 
2728 	bt_dev_dbg(hdev, "sock %p", sk);
2729 
2730 	hci_dev_lock(hdev);
2731 
2732 	if (pending_eir_or_class(hdev)) {
2733 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734 				      MGMT_STATUS_BUSY);
2735 		goto unlock;
2736 	}
2737 
2738 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2739 		hci_uuids_clear(hdev);
2740 
2741 		if (enable_service_cache(hdev)) {
2742 			err = mgmt_cmd_complete(sk, hdev->id,
2743 						MGMT_OP_REMOVE_UUID,
2744 						0, hdev->dev_class, 3);
2745 			goto unlock;
2746 		}
2747 
2748 		goto update_class;
2749 	}
2750 
2751 	found = 0;
2752 
2753 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2754 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2755 			continue;
2756 
2757 		list_del(&match->list);
2758 		kfree(match);
2759 		found++;
2760 	}
2761 
2762 	if (found == 0) {
2763 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2764 				      MGMT_STATUS_INVALID_PARAMS);
2765 		goto unlock;
2766 	}
2767 
2768 update_class:
2769 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2770 	if (!cmd) {
2771 		err = -ENOMEM;
2772 		goto unlock;
2773 	}
2774 
2775 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2776 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2777 	 */
2778 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2779 				  mgmt_class_complete);
2780 	if (err < 0)
2781 		mgmt_pending_free(cmd);
2782 
2783 unlock:
2784 	hci_dev_unlock(hdev);
2785 	return err;
2786 }
2787 
2788 static int set_class_sync(struct hci_dev *hdev, void *data)
2789 {
2790 	int err = 0;
2791 
2792 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2793 		cancel_delayed_work_sync(&hdev->service_cache);
2794 		err = hci_update_eir_sync(hdev);
2795 	}
2796 
2797 	if (err)
2798 		return err;
2799 
2800 	return hci_update_class_sync(hdev);
2801 }
2802 
2803 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2804 			 u16 len)
2805 {
2806 	struct mgmt_cp_set_dev_class *cp = data;
2807 	struct mgmt_pending_cmd *cmd;
2808 	int err;
2809 
2810 	bt_dev_dbg(hdev, "sock %p", sk);
2811 
2812 	if (!lmp_bredr_capable(hdev))
2813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2814 				       MGMT_STATUS_NOT_SUPPORTED);
2815 
2816 	hci_dev_lock(hdev);
2817 
2818 	if (pending_eir_or_class(hdev)) {
2819 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2820 				      MGMT_STATUS_BUSY);
2821 		goto unlock;
2822 	}
2823 
2824 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2825 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2826 				      MGMT_STATUS_INVALID_PARAMS);
2827 		goto unlock;
2828 	}
2829 
2830 	hdev->major_class = cp->major;
2831 	hdev->minor_class = cp->minor;
2832 
2833 	if (!hdev_is_powered(hdev)) {
2834 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2835 					hdev->dev_class, 3);
2836 		goto unlock;
2837 	}
2838 
2839 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2840 	if (!cmd) {
2841 		err = -ENOMEM;
2842 		goto unlock;
2843 	}
2844 
2845 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2846 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2847 	 */
2848 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2849 				  mgmt_class_complete);
2850 	if (err < 0)
2851 		mgmt_pending_free(cmd);
2852 
2853 unlock:
2854 	hci_dev_unlock(hdev);
2855 	return err;
2856 }
2857 
2858 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2859 			  u16 len)
2860 {
2861 	struct mgmt_cp_load_link_keys *cp = data;
2862 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2863 				   sizeof(struct mgmt_link_key_info));
2864 	u16 key_count, expected_len;
2865 	bool changed;
2866 	int i;
2867 
2868 	bt_dev_dbg(hdev, "sock %p", sk);
2869 
2870 	if (!lmp_bredr_capable(hdev))
2871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2872 				       MGMT_STATUS_NOT_SUPPORTED);
2873 
2874 	key_count = __le16_to_cpu(cp->key_count);
2875 	if (key_count > max_key_count) {
2876 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2877 			   key_count);
2878 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2879 				       MGMT_STATUS_INVALID_PARAMS);
2880 	}
2881 
2882 	expected_len = struct_size(cp, keys, key_count);
2883 	if (expected_len != len) {
2884 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2885 			   expected_len, len);
2886 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 				       MGMT_STATUS_INVALID_PARAMS);
2888 	}
2889 
2890 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2891 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2892 				       MGMT_STATUS_INVALID_PARAMS);
2893 
2894 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2895 		   key_count);
2896 
2897 	hci_dev_lock(hdev);
2898 
2899 	hci_link_keys_clear(hdev);
2900 
2901 	if (cp->debug_keys)
2902 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2903 	else
2904 		changed = hci_dev_test_and_clear_flag(hdev,
2905 						      HCI_KEEP_DEBUG_KEYS);
2906 
2907 	if (changed)
2908 		new_settings(hdev, NULL);
2909 
2910 	for (i = 0; i < key_count; i++) {
2911 		struct mgmt_link_key_info *key = &cp->keys[i];
2912 
2913 		if (hci_is_blocked_key(hdev,
2914 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2915 				       key->val)) {
2916 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2917 				    &key->addr.bdaddr);
2918 			continue;
2919 		}
2920 
2921 		if (key->addr.type != BDADDR_BREDR) {
2922 			bt_dev_warn(hdev,
2923 				    "Invalid link address type %u for %pMR",
2924 				    key->addr.type, &key->addr.bdaddr);
2925 			continue;
2926 		}
2927 
2928 		if (key->type > 0x08) {
2929 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2930 				    key->type, &key->addr.bdaddr);
2931 			continue;
2932 		}
2933 
2934 		/* Always ignore debug keys and require a new pairing if
2935 		 * the user wants to use them.
2936 		 */
2937 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2938 			continue;
2939 
2940 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2941 				 key->type, key->pin_len, NULL);
2942 	}
2943 
2944 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2945 
2946 	hci_dev_unlock(hdev);
2947 
2948 	return 0;
2949 }
2950 
2951 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 			   u8 addr_type, struct sock *skip_sk)
2953 {
2954 	struct mgmt_ev_device_unpaired ev;
2955 
2956 	bacpy(&ev.addr.bdaddr, bdaddr);
2957 	ev.addr.type = addr_type;
2958 
2959 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2960 			  skip_sk);
2961 }
2962 
2963 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2964 {
2965 	struct mgmt_pending_cmd *cmd = data;
2966 	struct mgmt_cp_unpair_device *cp = cmd->param;
2967 
2968 	if (!err)
2969 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2970 
2971 	cmd->cmd_complete(cmd, err);
2972 	mgmt_pending_free(cmd);
2973 }
2974 
2975 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2976 {
2977 	struct mgmt_pending_cmd *cmd = data;
2978 	struct mgmt_cp_unpair_device *cp = cmd->param;
2979 	struct hci_conn *conn;
2980 
2981 	if (cp->addr.type == BDADDR_BREDR)
2982 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2983 					       &cp->addr.bdaddr);
2984 	else
2985 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2986 					       le_addr_type(cp->addr.type));
2987 
2988 	if (!conn)
2989 		return 0;
2990 
2991 	/* Disregard any possible error since the likes of hci_abort_conn_sync
2992 	 * will clean up the connection no matter the error.
2993 	 */
2994 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2995 
2996 	return 0;
2997 }
2998 
2999 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3000 			 u16 len)
3001 {
3002 	struct mgmt_cp_unpair_device *cp = data;
3003 	struct mgmt_rp_unpair_device rp;
3004 	struct hci_conn_params *params;
3005 	struct mgmt_pending_cmd *cmd;
3006 	struct hci_conn *conn;
3007 	u8 addr_type;
3008 	int err;
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (cp->addr.type == BDADDR_BREDR) {
3034 		/* If disconnection is requested, then look up the
3035 		 * connection. If the remote device is connected, it
3036 		 * will be later used to terminate the link.
3037 		 *
3038 		 * Setting it to NULL explicitly will cause no
3039 		 * termination of the link.
3040 		 */
3041 		if (cp->disconnect)
3042 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3043 						       &cp->addr.bdaddr);
3044 		else
3045 			conn = NULL;
3046 
3047 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3048 		if (err < 0) {
3049 			err = mgmt_cmd_complete(sk, hdev->id,
3050 						MGMT_OP_UNPAIR_DEVICE,
3051 						MGMT_STATUS_NOT_PAIRED, &rp,
3052 						sizeof(rp));
3053 			goto unlock;
3054 		}
3055 
3056 		goto done;
3057 	}
3058 
3059 	/* LE address type */
3060 	addr_type = le_addr_type(cp->addr.type);
3061 
3062 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3063 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3064 	if (err < 0) {
3065 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3066 					MGMT_STATUS_NOT_PAIRED, &rp,
3067 					sizeof(rp));
3068 		goto unlock;
3069 	}
3070 
3071 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3072 	if (!conn) {
3073 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3074 		goto done;
3075 	}
3076 
3077 
3078 	/* Defer clearing up the connection parameters until closing to
3079 	 * give a chance of keeping them if a repairing happens.
3080 	 */
3081 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3082 
3083 	/* Disable auto-connection parameters if present */
3084 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3085 	if (params) {
3086 		if (params->explicit_connect)
3087 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3088 		else
3089 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3090 	}
3091 
3092 	/* If disconnection is not requested, then clear the connection
3093 	 * variable so that the link is not terminated.
3094 	 */
3095 	if (!cp->disconnect)
3096 		conn = NULL;
3097 
3098 done:
3099 	/* If the connection variable is set, then termination of the
3100 	 * link is requested.
3101 	 */
3102 	if (!conn) {
3103 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3104 					&rp, sizeof(rp));
3105 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3106 		goto unlock;
3107 	}
3108 
3109 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3110 			       sizeof(*cp));
3111 	if (!cmd) {
3112 		err = -ENOMEM;
3113 		goto unlock;
3114 	}
3115 
3116 	cmd->cmd_complete = addr_cmd_complete;
3117 
3118 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3119 				 unpair_device_complete);
3120 	if (err < 0)
3121 		mgmt_pending_free(cmd);
3122 
3123 unlock:
3124 	hci_dev_unlock(hdev);
3125 	return err;
3126 }
3127 
3128 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3129 {
3130 	struct mgmt_pending_cmd *cmd = data;
3131 
3132 	cmd->cmd_complete(cmd, mgmt_status(err));
3133 	mgmt_pending_free(cmd);
3134 }
3135 
3136 static int disconnect_sync(struct hci_dev *hdev, void *data)
3137 {
3138 	struct mgmt_pending_cmd *cmd = data;
3139 	struct mgmt_cp_disconnect *cp = cmd->param;
3140 	struct hci_conn *conn;
3141 
3142 	if (cp->addr.type == BDADDR_BREDR)
3143 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3144 					       &cp->addr.bdaddr);
3145 	else
3146 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3147 					       le_addr_type(cp->addr.type));
3148 
3149 	if (!conn)
3150 		return -ENOTCONN;
3151 
3152 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3153 	 * will clean up the connection no matter the error.
3154 	 */
3155 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3156 
3157 	return 0;
3158 }
3159 
3160 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3161 		      u16 len)
3162 {
3163 	struct mgmt_cp_disconnect *cp = data;
3164 	struct mgmt_rp_disconnect rp;
3165 	struct mgmt_pending_cmd *cmd;
3166 	int err;
3167 
3168 	bt_dev_dbg(hdev, "sock %p", sk);
3169 
3170 	memset(&rp, 0, sizeof(rp));
3171 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3172 	rp.addr.type = cp->addr.type;
3173 
3174 	if (!bdaddr_type_is_valid(cp->addr.type))
3175 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3176 					 MGMT_STATUS_INVALID_PARAMS,
3177 					 &rp, sizeof(rp));
3178 
3179 	hci_dev_lock(hdev);
3180 
3181 	if (!test_bit(HCI_UP, &hdev->flags)) {
3182 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3183 					MGMT_STATUS_NOT_POWERED, &rp,
3184 					sizeof(rp));
3185 		goto failed;
3186 	}
3187 
3188 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3189 	if (!cmd) {
3190 		err = -ENOMEM;
3191 		goto failed;
3192 	}
3193 
3194 	cmd->cmd_complete = generic_cmd_complete;
3195 
3196 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3197 				 disconnect_complete);
3198 	if (err < 0)
3199 		mgmt_pending_free(cmd);
3200 
3201 failed:
3202 	hci_dev_unlock(hdev);
3203 	return err;
3204 }
3205 
3206 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3207 {
3208 	switch (link_type) {
3209 	case ISO_LINK:
3210 	case LE_LINK:
3211 		switch (addr_type) {
3212 		case ADDR_LE_DEV_PUBLIC:
3213 			return BDADDR_LE_PUBLIC;
3214 
3215 		default:
3216 			/* Fallback to LE Random address type */
3217 			return BDADDR_LE_RANDOM;
3218 		}
3219 
3220 	default:
3221 		/* Fallback to BR/EDR type */
3222 		return BDADDR_BREDR;
3223 	}
3224 }
3225 
3226 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3227 			   u16 data_len)
3228 {
3229 	struct mgmt_rp_get_connections *rp;
3230 	struct hci_conn *c;
3231 	int err;
3232 	u16 i;
3233 
3234 	bt_dev_dbg(hdev, "sock %p", sk);
3235 
3236 	hci_dev_lock(hdev);
3237 
3238 	if (!hdev_is_powered(hdev)) {
3239 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3240 				      MGMT_STATUS_NOT_POWERED);
3241 		goto unlock;
3242 	}
3243 
3244 	i = 0;
3245 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3246 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3247 			i++;
3248 	}
3249 
3250 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3251 	if (!rp) {
3252 		err = -ENOMEM;
3253 		goto unlock;
3254 	}
3255 
3256 	i = 0;
3257 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3258 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3259 			continue;
3260 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3261 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3262 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3263 			continue;
3264 		i++;
3265 	}
3266 
3267 	rp->conn_count = cpu_to_le16(i);
3268 
3269 	/* Recalculate length in case of filtered SCO connections, etc */
3270 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3271 				struct_size(rp, addr, i));
3272 
3273 	kfree(rp);
3274 
3275 unlock:
3276 	hci_dev_unlock(hdev);
3277 	return err;
3278 }
3279 
3280 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3281 				   struct mgmt_cp_pin_code_neg_reply *cp)
3282 {
3283 	struct mgmt_pending_cmd *cmd;
3284 	int err;
3285 
3286 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3287 			       sizeof(*cp));
3288 	if (!cmd)
3289 		return -ENOMEM;
3290 
3291 	cmd->cmd_complete = addr_cmd_complete;
3292 
3293 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3294 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3295 	if (err < 0)
3296 		mgmt_pending_remove(cmd);
3297 
3298 	return err;
3299 }
3300 
3301 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3302 			  u16 len)
3303 {
3304 	struct hci_conn *conn;
3305 	struct mgmt_cp_pin_code_reply *cp = data;
3306 	struct hci_cp_pin_code_reply reply;
3307 	struct mgmt_pending_cmd *cmd;
3308 	int err;
3309 
3310 	bt_dev_dbg(hdev, "sock %p", sk);
3311 
3312 	hci_dev_lock(hdev);
3313 
3314 	if (!hdev_is_powered(hdev)) {
3315 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3316 				      MGMT_STATUS_NOT_POWERED);
3317 		goto failed;
3318 	}
3319 
3320 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3321 	if (!conn) {
3322 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3323 				      MGMT_STATUS_NOT_CONNECTED);
3324 		goto failed;
3325 	}
3326 
3327 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3328 		struct mgmt_cp_pin_code_neg_reply ncp;
3329 
3330 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3331 
3332 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3333 
3334 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3335 		if (err >= 0)
3336 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3337 					      MGMT_STATUS_INVALID_PARAMS);
3338 
3339 		goto failed;
3340 	}
3341 
3342 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3343 	if (!cmd) {
3344 		err = -ENOMEM;
3345 		goto failed;
3346 	}
3347 
3348 	cmd->cmd_complete = addr_cmd_complete;
3349 
3350 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3351 	reply.pin_len = cp->pin_len;
3352 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3353 
3354 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3355 	if (err < 0)
3356 		mgmt_pending_remove(cmd);
3357 
3358 failed:
3359 	hci_dev_unlock(hdev);
3360 	return err;
3361 }
3362 
3363 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3364 			     u16 len)
3365 {
3366 	struct mgmt_cp_set_io_capability *cp = data;
3367 
3368 	bt_dev_dbg(hdev, "sock %p", sk);
3369 
3370 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3372 				       MGMT_STATUS_INVALID_PARAMS);
3373 
3374 	hci_dev_lock(hdev);
3375 
3376 	hdev->io_capability = cp->io_capability;
3377 
3378 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3379 
3380 	hci_dev_unlock(hdev);
3381 
3382 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3383 				 NULL, 0);
3384 }
3385 
3386 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3387 {
3388 	struct hci_dev *hdev = conn->hdev;
3389 	struct mgmt_pending_cmd *cmd;
3390 
3391 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3392 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3393 			continue;
3394 
3395 		if (cmd->user_data != conn)
3396 			continue;
3397 
3398 		return cmd;
3399 	}
3400 
3401 	return NULL;
3402 }
3403 
3404 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3405 {
3406 	struct mgmt_rp_pair_device rp;
3407 	struct hci_conn *conn = cmd->user_data;
3408 	int err;
3409 
3410 	bacpy(&rp.addr.bdaddr, &conn->dst);
3411 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3412 
3413 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3414 				status, &rp, sizeof(rp));
3415 
3416 	/* So we don't get further callbacks for this connection */
3417 	conn->connect_cfm_cb = NULL;
3418 	conn->security_cfm_cb = NULL;
3419 	conn->disconn_cfm_cb = NULL;
3420 
3421 	hci_conn_drop(conn);
3422 
3423 	/* The device is paired so there is no need to remove
3424 	 * its connection parameters anymore.
3425 	 */
3426 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3427 
3428 	hci_conn_put(conn);
3429 
3430 	return err;
3431 }
3432 
3433 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3434 {
3435 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3436 	struct mgmt_pending_cmd *cmd;
3437 
3438 	cmd = find_pairing(conn);
3439 	if (cmd) {
3440 		cmd->cmd_complete(cmd, status);
3441 		mgmt_pending_remove(cmd);
3442 	}
3443 }
3444 
3445 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3446 {
3447 	struct mgmt_pending_cmd *cmd;
3448 
3449 	BT_DBG("status %u", status);
3450 
3451 	cmd = find_pairing(conn);
3452 	if (!cmd) {
3453 		BT_DBG("Unable to find a pending command");
3454 		return;
3455 	}
3456 
3457 	cmd->cmd_complete(cmd, mgmt_status(status));
3458 	mgmt_pending_remove(cmd);
3459 }
3460 
3461 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3462 {
3463 	struct mgmt_pending_cmd *cmd;
3464 
3465 	BT_DBG("status %u", status);
3466 
3467 	if (!status)
3468 		return;
3469 
3470 	cmd = find_pairing(conn);
3471 	if (!cmd) {
3472 		BT_DBG("Unable to find a pending command");
3473 		return;
3474 	}
3475 
3476 	cmd->cmd_complete(cmd, mgmt_status(status));
3477 	mgmt_pending_remove(cmd);
3478 }
3479 
3480 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3481 		       u16 len)
3482 {
3483 	struct mgmt_cp_pair_device *cp = data;
3484 	struct mgmt_rp_pair_device rp;
3485 	struct mgmt_pending_cmd *cmd;
3486 	u8 sec_level, auth_type;
3487 	struct hci_conn *conn;
3488 	int err;
3489 
3490 	bt_dev_dbg(hdev, "sock %p", sk);
3491 
3492 	memset(&rp, 0, sizeof(rp));
3493 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3494 	rp.addr.type = cp->addr.type;
3495 
3496 	if (!bdaddr_type_is_valid(cp->addr.type))
3497 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3498 					 MGMT_STATUS_INVALID_PARAMS,
3499 					 &rp, sizeof(rp));
3500 
3501 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3502 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3503 					 MGMT_STATUS_INVALID_PARAMS,
3504 					 &rp, sizeof(rp));
3505 
3506 	hci_dev_lock(hdev);
3507 
3508 	if (!hdev_is_powered(hdev)) {
3509 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3510 					MGMT_STATUS_NOT_POWERED, &rp,
3511 					sizeof(rp));
3512 		goto unlock;
3513 	}
3514 
3515 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3516 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3517 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3518 					sizeof(rp));
3519 		goto unlock;
3520 	}
3521 
3522 	sec_level = BT_SECURITY_MEDIUM;
3523 	auth_type = HCI_AT_DEDICATED_BONDING;
3524 
3525 	if (cp->addr.type == BDADDR_BREDR) {
3526 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3527 				       auth_type, CONN_REASON_PAIR_DEVICE,
3528 				       HCI_ACL_CONN_TIMEOUT);
3529 	} else {
3530 		u8 addr_type = le_addr_type(cp->addr.type);
3531 		struct hci_conn_params *p;
3532 
3533 		/* When pairing a new device, it is expected to remember
3534 		 * this device for future connections. Adding the connection
3535 		 * parameter information ahead of time allows tracking
3536 		 * of the peripheral preferred values and will speed up any
3537 		 * further connection establishment.
3538 		 *
3539 		 * If connection parameters already exist, then they
3540 		 * will be kept and this function does nothing.
3541 		 */
3542 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3543 		if (!p) {
3544 			err = -EIO;
3545 			goto unlock;
3546 		}
3547 
3548 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3549 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3550 
3551 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3552 					   sec_level, HCI_LE_CONN_TIMEOUT,
3553 					   CONN_REASON_PAIR_DEVICE);
3554 	}
3555 
3556 	if (IS_ERR(conn)) {
3557 		int status;
3558 
3559 		if (PTR_ERR(conn) == -EBUSY)
3560 			status = MGMT_STATUS_BUSY;
3561 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3562 			status = MGMT_STATUS_NOT_SUPPORTED;
3563 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3564 			status = MGMT_STATUS_REJECTED;
3565 		else
3566 			status = MGMT_STATUS_CONNECT_FAILED;
3567 
3568 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3569 					status, &rp, sizeof(rp));
3570 		goto unlock;
3571 	}
3572 
3573 	if (conn->connect_cfm_cb) {
3574 		hci_conn_drop(conn);
3575 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3576 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3577 		goto unlock;
3578 	}
3579 
3580 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3581 	if (!cmd) {
3582 		err = -ENOMEM;
3583 		hci_conn_drop(conn);
3584 		goto unlock;
3585 	}
3586 
3587 	cmd->cmd_complete = pairing_complete;
3588 
3589 	/* For LE, just connecting isn't a proof that the pairing finished */
3590 	if (cp->addr.type == BDADDR_BREDR) {
3591 		conn->connect_cfm_cb = pairing_complete_cb;
3592 		conn->security_cfm_cb = pairing_complete_cb;
3593 		conn->disconn_cfm_cb = pairing_complete_cb;
3594 	} else {
3595 		conn->connect_cfm_cb = le_pairing_complete_cb;
3596 		conn->security_cfm_cb = le_pairing_complete_cb;
3597 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3598 	}
3599 
3600 	conn->io_capability = cp->io_cap;
3601 	cmd->user_data = hci_conn_get(conn);
3602 
3603 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3604 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3605 		cmd->cmd_complete(cmd, 0);
3606 		mgmt_pending_remove(cmd);
3607 	}
3608 
3609 	err = 0;
3610 
3611 unlock:
3612 	hci_dev_unlock(hdev);
3613 	return err;
3614 }
3615 
3616 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3617 			      u16 len)
3618 {
3619 	struct mgmt_addr_info *addr = data;
3620 	struct mgmt_pending_cmd *cmd;
3621 	struct hci_conn *conn;
3622 	int err;
3623 
3624 	bt_dev_dbg(hdev, "sock %p", sk);
3625 
3626 	hci_dev_lock(hdev);
3627 
3628 	if (!hdev_is_powered(hdev)) {
3629 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3630 				      MGMT_STATUS_NOT_POWERED);
3631 		goto unlock;
3632 	}
3633 
3634 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3635 	if (!cmd) {
3636 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3637 				      MGMT_STATUS_INVALID_PARAMS);
3638 		goto unlock;
3639 	}
3640 
3641 	conn = cmd->user_data;
3642 
3643 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3644 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3645 				      MGMT_STATUS_INVALID_PARAMS);
3646 		goto unlock;
3647 	}
3648 
3649 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3650 	mgmt_pending_remove(cmd);
3651 
3652 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3653 				addr, sizeof(*addr));
3654 
3655 	/* Since user doesn't want to proceed with the connection, abort any
3656 	 * ongoing pairing and then terminate the link if it was created
3657 	 * because of the pair device action.
3658 	 */
3659 	if (addr->type == BDADDR_BREDR)
3660 		hci_remove_link_key(hdev, &addr->bdaddr);
3661 	else
3662 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3663 					      le_addr_type(addr->type));
3664 
3665 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3666 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3667 
3668 unlock:
3669 	hci_dev_unlock(hdev);
3670 	return err;
3671 }
3672 
3673 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3674 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3675 			     u16 hci_op, __le32 passkey)
3676 {
3677 	struct mgmt_pending_cmd *cmd;
3678 	struct hci_conn *conn;
3679 	int err;
3680 
3681 	hci_dev_lock(hdev);
3682 
3683 	if (!hdev_is_powered(hdev)) {
3684 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 					MGMT_STATUS_NOT_POWERED, addr,
3686 					sizeof(*addr));
3687 		goto done;
3688 	}
3689 
3690 	if (addr->type == BDADDR_BREDR)
3691 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3692 	else
3693 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3694 					       le_addr_type(addr->type));
3695 
3696 	if (!conn) {
3697 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3698 					MGMT_STATUS_NOT_CONNECTED, addr,
3699 					sizeof(*addr));
3700 		goto done;
3701 	}
3702 
3703 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3704 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3705 		if (!err)
3706 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3707 						MGMT_STATUS_SUCCESS, addr,
3708 						sizeof(*addr));
3709 		else
3710 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3711 						MGMT_STATUS_FAILED, addr,
3712 						sizeof(*addr));
3713 
3714 		goto done;
3715 	}
3716 
3717 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3718 	if (!cmd) {
3719 		err = -ENOMEM;
3720 		goto done;
3721 	}
3722 
3723 	cmd->cmd_complete = addr_cmd_complete;
3724 
3725 	/* Continue with pairing via HCI */
3726 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3727 		struct hci_cp_user_passkey_reply cp;
3728 
3729 		bacpy(&cp.bdaddr, &addr->bdaddr);
3730 		cp.passkey = passkey;
3731 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3732 	} else
3733 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3734 				   &addr->bdaddr);
3735 
3736 	if (err < 0)
3737 		mgmt_pending_remove(cmd);
3738 
3739 done:
3740 	hci_dev_unlock(hdev);
3741 	return err;
3742 }
3743 
3744 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3745 			      void *data, u16 len)
3746 {
3747 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3748 
3749 	bt_dev_dbg(hdev, "sock %p", sk);
3750 
3751 	return user_pairing_resp(sk, hdev, &cp->addr,
3752 				MGMT_OP_PIN_CODE_NEG_REPLY,
3753 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3754 }
3755 
3756 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3757 			      u16 len)
3758 {
3759 	struct mgmt_cp_user_confirm_reply *cp = data;
3760 
3761 	bt_dev_dbg(hdev, "sock %p", sk);
3762 
3763 	if (len != sizeof(*cp))
3764 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3765 				       MGMT_STATUS_INVALID_PARAMS);
3766 
3767 	return user_pairing_resp(sk, hdev, &cp->addr,
3768 				 MGMT_OP_USER_CONFIRM_REPLY,
3769 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3770 }
3771 
3772 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3773 				  void *data, u16 len)
3774 {
3775 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3776 
3777 	bt_dev_dbg(hdev, "sock %p", sk);
3778 
3779 	return user_pairing_resp(sk, hdev, &cp->addr,
3780 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3781 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3782 }
3783 
3784 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3785 			      u16 len)
3786 {
3787 	struct mgmt_cp_user_passkey_reply *cp = data;
3788 
3789 	bt_dev_dbg(hdev, "sock %p", sk);
3790 
3791 	return user_pairing_resp(sk, hdev, &cp->addr,
3792 				 MGMT_OP_USER_PASSKEY_REPLY,
3793 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3794 }
3795 
3796 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3797 				  void *data, u16 len)
3798 {
3799 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3800 
3801 	bt_dev_dbg(hdev, "sock %p", sk);
3802 
3803 	return user_pairing_resp(sk, hdev, &cp->addr,
3804 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3805 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3806 }
3807 
3808 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3809 {
3810 	struct adv_info *adv_instance;
3811 
3812 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3813 	if (!adv_instance)
3814 		return 0;
3815 
3816 	/* stop if current instance doesn't need to be changed */
3817 	if (!(adv_instance->flags & flags))
3818 		return 0;
3819 
3820 	cancel_adv_timeout(hdev);
3821 
3822 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3823 	if (!adv_instance)
3824 		return 0;
3825 
3826 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3827 
3828 	return 0;
3829 }
3830 
3831 static int name_changed_sync(struct hci_dev *hdev, void *data)
3832 {
3833 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3834 }
3835 
3836 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3837 {
3838 	struct mgmt_pending_cmd *cmd = data;
3839 	struct mgmt_cp_set_local_name *cp = cmd->param;
3840 	u8 status = mgmt_status(err);
3841 
3842 	bt_dev_dbg(hdev, "err %d", err);
3843 
3844 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3845 		return;
3846 
3847 	if (status) {
3848 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3849 				status);
3850 	} else {
3851 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3852 				  cp, sizeof(*cp));
3853 
3854 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3855 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3856 	}
3857 
3858 	mgmt_pending_remove(cmd);
3859 }
3860 
3861 static int set_name_sync(struct hci_dev *hdev, void *data)
3862 {
3863 	if (lmp_bredr_capable(hdev)) {
3864 		hci_update_name_sync(hdev);
3865 		hci_update_eir_sync(hdev);
3866 	}
3867 
3868 	/* The name is stored in the scan response data and so
3869 	 * no need to update the advertising data here.
3870 	 */
3871 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3872 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3873 
3874 	return 0;
3875 }
3876 
3877 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3878 			  u16 len)
3879 {
3880 	struct mgmt_cp_set_local_name *cp = data;
3881 	struct mgmt_pending_cmd *cmd;
3882 	int err;
3883 
3884 	bt_dev_dbg(hdev, "sock %p", sk);
3885 
3886 	hci_dev_lock(hdev);
3887 
3888 	/* If the old values are the same as the new ones just return a
3889 	 * direct command complete event.
3890 	 */
3891 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3892 	    !memcmp(hdev->short_name, cp->short_name,
3893 		    sizeof(hdev->short_name))) {
3894 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3895 					data, len);
3896 		goto failed;
3897 	}
3898 
3899 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3900 
3901 	if (!hdev_is_powered(hdev)) {
3902 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3903 
3904 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3905 					data, len);
3906 		if (err < 0)
3907 			goto failed;
3908 
3909 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3910 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3911 		ext_info_changed(hdev, sk);
3912 
3913 		goto failed;
3914 	}
3915 
3916 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3917 	if (!cmd)
3918 		err = -ENOMEM;
3919 	else
3920 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3921 					 set_name_complete);
3922 
3923 	if (err < 0) {
3924 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3925 				      MGMT_STATUS_FAILED);
3926 
3927 		if (cmd)
3928 			mgmt_pending_remove(cmd);
3929 
3930 		goto failed;
3931 	}
3932 
3933 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3934 
3935 failed:
3936 	hci_dev_unlock(hdev);
3937 	return err;
3938 }
3939 
3940 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3941 {
3942 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3943 }
3944 
3945 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3946 			  u16 len)
3947 {
3948 	struct mgmt_cp_set_appearance *cp = data;
3949 	u16 appearance;
3950 	int err;
3951 
3952 	bt_dev_dbg(hdev, "sock %p", sk);
3953 
3954 	if (!lmp_le_capable(hdev))
3955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3956 				       MGMT_STATUS_NOT_SUPPORTED);
3957 
3958 	appearance = le16_to_cpu(cp->appearance);
3959 
3960 	hci_dev_lock(hdev);
3961 
3962 	if (hdev->appearance != appearance) {
3963 		hdev->appearance = appearance;
3964 
3965 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3966 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3967 					   NULL);
3968 
3969 		ext_info_changed(hdev, sk);
3970 	}
3971 
3972 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3973 				0);
3974 
3975 	hci_dev_unlock(hdev);
3976 
3977 	return err;
3978 }
3979 
3980 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3981 				 void *data, u16 len)
3982 {
3983 	struct mgmt_rp_get_phy_configuration rp;
3984 
3985 	bt_dev_dbg(hdev, "sock %p", sk);
3986 
3987 	hci_dev_lock(hdev);
3988 
3989 	memset(&rp, 0, sizeof(rp));
3990 
3991 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3992 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3993 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3994 
3995 	hci_dev_unlock(hdev);
3996 
3997 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3998 				 &rp, sizeof(rp));
3999 }
4000 
4001 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4002 {
4003 	struct mgmt_ev_phy_configuration_changed ev;
4004 
4005 	memset(&ev, 0, sizeof(ev));
4006 
4007 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4008 
4009 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4010 			  sizeof(ev), skip);
4011 }
4012 
4013 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4014 {
4015 	struct mgmt_pending_cmd *cmd = data;
4016 	struct sk_buff *skb = cmd->skb;
4017 	u8 status = mgmt_status(err);
4018 
4019 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4020 		return;
4021 
4022 	if (!status) {
4023 		if (!skb)
4024 			status = MGMT_STATUS_FAILED;
4025 		else if (IS_ERR(skb))
4026 			status = mgmt_status(PTR_ERR(skb));
4027 		else
4028 			status = mgmt_status(skb->data[0]);
4029 	}
4030 
4031 	bt_dev_dbg(hdev, "status %d", status);
4032 
4033 	if (status) {
4034 		mgmt_cmd_status(cmd->sk, hdev->id,
4035 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4036 	} else {
4037 		mgmt_cmd_complete(cmd->sk, hdev->id,
4038 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4039 				  NULL, 0);
4040 
4041 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4042 	}
4043 
4044 	if (skb && !IS_ERR(skb))
4045 		kfree_skb(skb);
4046 
4047 	mgmt_pending_remove(cmd);
4048 }
4049 
4050 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4051 {
4052 	struct mgmt_pending_cmd *cmd = data;
4053 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4054 	struct hci_cp_le_set_default_phy cp_phy;
4055 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4056 
4057 	memset(&cp_phy, 0, sizeof(cp_phy));
4058 
4059 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4060 		cp_phy.all_phys |= 0x01;
4061 
4062 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4063 		cp_phy.all_phys |= 0x02;
4064 
4065 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4066 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4067 
4068 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4069 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4070 
4071 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4072 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4073 
4074 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4075 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4076 
4077 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4078 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4079 
4080 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4081 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4082 
4083 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4084 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4085 
4086 	return 0;
4087 }
4088 
4089 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4090 				 void *data, u16 len)
4091 {
4092 	struct mgmt_cp_set_phy_configuration *cp = data;
4093 	struct mgmt_pending_cmd *cmd;
4094 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4095 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4096 	bool changed = false;
4097 	int err;
4098 
4099 	bt_dev_dbg(hdev, "sock %p", sk);
4100 
4101 	configurable_phys = get_configurable_phys(hdev);
4102 	supported_phys = get_supported_phys(hdev);
4103 	selected_phys = __le32_to_cpu(cp->selected_phys);
4104 
4105 	if (selected_phys & ~supported_phys)
4106 		return mgmt_cmd_status(sk, hdev->id,
4107 				       MGMT_OP_SET_PHY_CONFIGURATION,
4108 				       MGMT_STATUS_INVALID_PARAMS);
4109 
4110 	unconfigure_phys = supported_phys & ~configurable_phys;
4111 
4112 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4113 		return mgmt_cmd_status(sk, hdev->id,
4114 				       MGMT_OP_SET_PHY_CONFIGURATION,
4115 				       MGMT_STATUS_INVALID_PARAMS);
4116 
4117 	if (selected_phys == get_selected_phys(hdev))
4118 		return mgmt_cmd_complete(sk, hdev->id,
4119 					 MGMT_OP_SET_PHY_CONFIGURATION,
4120 					 0, NULL, 0);
4121 
4122 	hci_dev_lock(hdev);
4123 
4124 	if (!hdev_is_powered(hdev)) {
4125 		err = mgmt_cmd_status(sk, hdev->id,
4126 				      MGMT_OP_SET_PHY_CONFIGURATION,
4127 				      MGMT_STATUS_REJECTED);
4128 		goto unlock;
4129 	}
4130 
4131 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4132 		err = mgmt_cmd_status(sk, hdev->id,
4133 				      MGMT_OP_SET_PHY_CONFIGURATION,
4134 				      MGMT_STATUS_BUSY);
4135 		goto unlock;
4136 	}
4137 
4138 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4139 		pkt_type |= (HCI_DH3 | HCI_DM3);
4140 	else
4141 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4142 
4143 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4144 		pkt_type |= (HCI_DH5 | HCI_DM5);
4145 	else
4146 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4147 
4148 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4149 		pkt_type &= ~HCI_2DH1;
4150 	else
4151 		pkt_type |= HCI_2DH1;
4152 
4153 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4154 		pkt_type &= ~HCI_2DH3;
4155 	else
4156 		pkt_type |= HCI_2DH3;
4157 
4158 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4159 		pkt_type &= ~HCI_2DH5;
4160 	else
4161 		pkt_type |= HCI_2DH5;
4162 
4163 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4164 		pkt_type &= ~HCI_3DH1;
4165 	else
4166 		pkt_type |= HCI_3DH1;
4167 
4168 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4169 		pkt_type &= ~HCI_3DH3;
4170 	else
4171 		pkt_type |= HCI_3DH3;
4172 
4173 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4174 		pkt_type &= ~HCI_3DH5;
4175 	else
4176 		pkt_type |= HCI_3DH5;
4177 
4178 	if (pkt_type != hdev->pkt_type) {
4179 		hdev->pkt_type = pkt_type;
4180 		changed = true;
4181 	}
4182 
4183 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4184 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4185 		if (changed)
4186 			mgmt_phy_configuration_changed(hdev, sk);
4187 
4188 		err = mgmt_cmd_complete(sk, hdev->id,
4189 					MGMT_OP_SET_PHY_CONFIGURATION,
4190 					0, NULL, 0);
4191 
4192 		goto unlock;
4193 	}
4194 
4195 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4196 			       len);
4197 	if (!cmd)
4198 		err = -ENOMEM;
4199 	else
4200 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4201 					 set_default_phy_complete);
4202 
4203 	if (err < 0) {
4204 		err = mgmt_cmd_status(sk, hdev->id,
4205 				      MGMT_OP_SET_PHY_CONFIGURATION,
4206 				      MGMT_STATUS_FAILED);
4207 
4208 		if (cmd)
4209 			mgmt_pending_remove(cmd);
4210 	}
4211 
4212 unlock:
4213 	hci_dev_unlock(hdev);
4214 
4215 	return err;
4216 }
4217 
4218 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4219 			    u16 len)
4220 {
4221 	int err = MGMT_STATUS_SUCCESS;
4222 	struct mgmt_cp_set_blocked_keys *keys = data;
4223 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4224 				   sizeof(struct mgmt_blocked_key_info));
4225 	u16 key_count, expected_len;
4226 	int i;
4227 
4228 	bt_dev_dbg(hdev, "sock %p", sk);
4229 
4230 	key_count = __le16_to_cpu(keys->key_count);
4231 	if (key_count > max_key_count) {
4232 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4233 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4234 				       MGMT_STATUS_INVALID_PARAMS);
4235 	}
4236 
4237 	expected_len = struct_size(keys, keys, key_count);
4238 	if (expected_len != len) {
4239 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4240 			   expected_len, len);
4241 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4242 				       MGMT_STATUS_INVALID_PARAMS);
4243 	}
4244 
4245 	hci_dev_lock(hdev);
4246 
4247 	hci_blocked_keys_clear(hdev);
4248 
4249 	for (i = 0; i < key_count; ++i) {
4250 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4251 
4252 		if (!b) {
4253 			err = MGMT_STATUS_NO_RESOURCES;
4254 			break;
4255 		}
4256 
4257 		b->type = keys->keys[i].type;
4258 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4259 		list_add_rcu(&b->list, &hdev->blocked_keys);
4260 	}
4261 	hci_dev_unlock(hdev);
4262 
4263 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4264 				err, NULL, 0);
4265 }
4266 
4267 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4268 			       void *data, u16 len)
4269 {
4270 	struct mgmt_mode *cp = data;
4271 	int err;
4272 	bool changed = false;
4273 
4274 	bt_dev_dbg(hdev, "sock %p", sk);
4275 
4276 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4277 		return mgmt_cmd_status(sk, hdev->id,
4278 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4279 				       MGMT_STATUS_NOT_SUPPORTED);
4280 
4281 	if (cp->val != 0x00 && cp->val != 0x01)
4282 		return mgmt_cmd_status(sk, hdev->id,
4283 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4284 				       MGMT_STATUS_INVALID_PARAMS);
4285 
4286 	hci_dev_lock(hdev);
4287 
4288 	if (hdev_is_powered(hdev) &&
4289 	    !!cp->val != hci_dev_test_flag(hdev,
4290 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4291 		err = mgmt_cmd_status(sk, hdev->id,
4292 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4293 				      MGMT_STATUS_REJECTED);
4294 		goto unlock;
4295 	}
4296 
4297 	if (cp->val)
4298 		changed = !hci_dev_test_and_set_flag(hdev,
4299 						   HCI_WIDEBAND_SPEECH_ENABLED);
4300 	else
4301 		changed = hci_dev_test_and_clear_flag(hdev,
4302 						   HCI_WIDEBAND_SPEECH_ENABLED);
4303 
4304 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4305 	if (err < 0)
4306 		goto unlock;
4307 
4308 	if (changed)
4309 		err = new_settings(hdev, sk);
4310 
4311 unlock:
4312 	hci_dev_unlock(hdev);
4313 	return err;
4314 }
4315 
4316 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4317 			       void *data, u16 data_len)
4318 {
4319 	char buf[20];
4320 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4321 	u16 cap_len = 0;
4322 	u8 flags = 0;
4323 	u8 tx_power_range[2];
4324 
4325 	bt_dev_dbg(hdev, "sock %p", sk);
4326 
4327 	memset(&buf, 0, sizeof(buf));
4328 
4329 	hci_dev_lock(hdev);
4330 
4331 	/* When the Read Simple Pairing Options command is supported, then
4332 	 * the remote public key validation is supported.
4333 	 *
4334 	 * Alternatively, when Microsoft extensions are available, they can
4335 	 * indicate support for public key validation as well.
4336 	 */
4337 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4338 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4339 
4340 	flags |= 0x02;		/* Remote public key validation (LE) */
4341 
4342 	/* When the Read Encryption Key Size command is supported, then the
4343 	 * encryption key size is enforced.
4344 	 */
4345 	if (hdev->commands[20] & 0x10)
4346 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4347 
4348 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4349 
4350 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4351 				  &flags, 1);
4352 
4353 	/* When the Read Simple Pairing Options command is supported, then
4354 	 * also max encryption key size information is provided.
4355 	 */
4356 	if (hdev->commands[41] & 0x08)
4357 		cap_len = eir_append_le16(rp->cap, cap_len,
4358 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4359 					  hdev->max_enc_key_size);
4360 
4361 	cap_len = eir_append_le16(rp->cap, cap_len,
4362 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4363 				  SMP_MAX_ENC_KEY_SIZE);
4364 
4365 	/* Append the min/max LE tx power parameters if we were able to fetch
4366 	 * it from the controller
4367 	 */
4368 	if (hdev->commands[38] & 0x80) {
4369 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4370 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4371 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4372 					  tx_power_range, 2);
4373 	}
4374 
4375 	rp->cap_len = cpu_to_le16(cap_len);
4376 
4377 	hci_dev_unlock(hdev);
4378 
4379 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4380 				 rp, sizeof(*rp) + cap_len);
4381 }
4382 
4383 #ifdef CONFIG_BT_FEATURE_DEBUG
4384 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4385 static const u8 debug_uuid[16] = {
4386 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4387 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4388 };
4389 #endif
4390 
4391 /* 330859bc-7506-492d-9370-9a6f0614037f */
4392 static const u8 quality_report_uuid[16] = {
4393 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4394 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4395 };
4396 
4397 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4398 static const u8 offload_codecs_uuid[16] = {
4399 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4400 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4401 };
4402 
4403 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4404 static const u8 le_simultaneous_roles_uuid[16] = {
4405 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4406 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4407 };
4408 
4409 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4410 static const u8 rpa_resolution_uuid[16] = {
4411 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4412 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4413 };
4414 
4415 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4416 static const u8 iso_socket_uuid[16] = {
4417 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4418 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4419 };
4420 
4421 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4422 static const u8 mgmt_mesh_uuid[16] = {
4423 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4424 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4425 };
4426 
4427 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4428 				  void *data, u16 data_len)
4429 {
4430 	struct mgmt_rp_read_exp_features_info *rp;
4431 	size_t len;
4432 	u16 idx = 0;
4433 	u32 flags;
4434 	int status;
4435 
4436 	bt_dev_dbg(hdev, "sock %p", sk);
4437 
4438 	/* Enough space for 7 features */
4439 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4440 	rp = kzalloc(len, GFP_KERNEL);
4441 	if (!rp)
4442 		return -ENOMEM;
4443 
4444 #ifdef CONFIG_BT_FEATURE_DEBUG
4445 	if (!hdev) {
4446 		flags = bt_dbg_get() ? BIT(0) : 0;
4447 
4448 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4449 		rp->features[idx].flags = cpu_to_le32(flags);
4450 		idx++;
4451 	}
4452 #endif
4453 
4454 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4455 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4456 			flags = BIT(0);
4457 		else
4458 			flags = 0;
4459 
4460 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4461 		rp->features[idx].flags = cpu_to_le32(flags);
4462 		idx++;
4463 	}
4464 
4465 	if (hdev && ll_privacy_capable(hdev)) {
4466 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4467 			flags = BIT(0) | BIT(1);
4468 		else
4469 			flags = BIT(1);
4470 
4471 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4472 		rp->features[idx].flags = cpu_to_le32(flags);
4473 		idx++;
4474 	}
4475 
4476 	if (hdev && (aosp_has_quality_report(hdev) ||
4477 		     hdev->set_quality_report)) {
4478 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4479 			flags = BIT(0);
4480 		else
4481 			flags = 0;
4482 
4483 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4484 		rp->features[idx].flags = cpu_to_le32(flags);
4485 		idx++;
4486 	}
4487 
4488 	if (hdev && hdev->get_data_path_id) {
4489 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4490 			flags = BIT(0);
4491 		else
4492 			flags = 0;
4493 
4494 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4495 		rp->features[idx].flags = cpu_to_le32(flags);
4496 		idx++;
4497 	}
4498 
4499 	if (IS_ENABLED(CONFIG_BT_LE)) {
4500 		flags = iso_enabled() ? BIT(0) : 0;
4501 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4502 		rp->features[idx].flags = cpu_to_le32(flags);
4503 		idx++;
4504 	}
4505 
4506 	if (hdev && lmp_le_capable(hdev)) {
4507 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4508 			flags = BIT(0);
4509 		else
4510 			flags = 0;
4511 
4512 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4513 		rp->features[idx].flags = cpu_to_le32(flags);
4514 		idx++;
4515 	}
4516 
4517 	rp->feature_count = cpu_to_le16(idx);
4518 
4519 	/* After reading the experimental features information, enable
4520 	 * the events to update client on any future change.
4521 	 */
4522 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4523 
4524 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4525 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4526 				   0, rp, sizeof(*rp) + (20 * idx));
4527 
4528 	kfree(rp);
4529 	return status;
4530 }
4531 
4532 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4533 					  struct sock *skip)
4534 {
4535 	struct mgmt_ev_exp_feature_changed ev;
4536 
4537 	memset(&ev, 0, sizeof(ev));
4538 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4539 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4540 
4541 	// Do we need to be atomic with the conn_flags?
4542 	if (enabled && privacy_mode_capable(hdev))
4543 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4544 	else
4545 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4546 
4547 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4548 				  &ev, sizeof(ev),
4549 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4550 
4551 }
4552 
4553 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4554 			       bool enabled, struct sock *skip)
4555 {
4556 	struct mgmt_ev_exp_feature_changed ev;
4557 
4558 	memset(&ev, 0, sizeof(ev));
4559 	memcpy(ev.uuid, uuid, 16);
4560 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4561 
4562 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4563 				  &ev, sizeof(ev),
4564 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4565 }
4566 
4567 #define EXP_FEAT(_uuid, _set_func)	\
4568 {					\
4569 	.uuid = _uuid,			\
4570 	.set_func = _set_func,		\
4571 }
4572 
4573 /* The zero key uuid is special. Multiple exp features are set through it. */
4574 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4575 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4576 {
4577 	struct mgmt_rp_set_exp_feature rp;
4578 
4579 	memset(rp.uuid, 0, 16);
4580 	rp.flags = cpu_to_le32(0);
4581 
4582 #ifdef CONFIG_BT_FEATURE_DEBUG
4583 	if (!hdev) {
4584 		bool changed = bt_dbg_get();
4585 
4586 		bt_dbg_set(false);
4587 
4588 		if (changed)
4589 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4590 	}
4591 #endif
4592 
4593 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4594 		bool changed;
4595 
4596 		changed = hci_dev_test_and_clear_flag(hdev,
4597 						      HCI_ENABLE_LL_PRIVACY);
4598 		if (changed)
4599 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4600 					    sk);
4601 	}
4602 
4603 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4604 
4605 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4606 				 MGMT_OP_SET_EXP_FEATURE, 0,
4607 				 &rp, sizeof(rp));
4608 }
4609 
4610 #ifdef CONFIG_BT_FEATURE_DEBUG
4611 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4612 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4613 {
4614 	struct mgmt_rp_set_exp_feature rp;
4615 
4616 	bool val, changed;
4617 	int err;
4618 
4619 	/* Command requires to use the non-controller index */
4620 	if (hdev)
4621 		return mgmt_cmd_status(sk, hdev->id,
4622 				       MGMT_OP_SET_EXP_FEATURE,
4623 				       MGMT_STATUS_INVALID_INDEX);
4624 
4625 	/* Parameters are limited to a single octet */
4626 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4627 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4628 				       MGMT_OP_SET_EXP_FEATURE,
4629 				       MGMT_STATUS_INVALID_PARAMS);
4630 
4631 	/* Only boolean on/off is supported */
4632 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4633 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4634 				       MGMT_OP_SET_EXP_FEATURE,
4635 				       MGMT_STATUS_INVALID_PARAMS);
4636 
4637 	val = !!cp->param[0];
4638 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4639 	bt_dbg_set(val);
4640 
4641 	memcpy(rp.uuid, debug_uuid, 16);
4642 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4643 
4644 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4645 
4646 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4647 				MGMT_OP_SET_EXP_FEATURE, 0,
4648 				&rp, sizeof(rp));
4649 
4650 	if (changed)
4651 		exp_feature_changed(hdev, debug_uuid, val, sk);
4652 
4653 	return err;
4654 }
4655 #endif
4656 
4657 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4658 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4659 {
4660 	struct mgmt_rp_set_exp_feature rp;
4661 	bool val, changed;
4662 	int err;
4663 
4664 	/* Command requires to use the controller index */
4665 	if (!hdev)
4666 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4667 				       MGMT_OP_SET_EXP_FEATURE,
4668 				       MGMT_STATUS_INVALID_INDEX);
4669 
4670 	/* Parameters are limited to a single octet */
4671 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4672 		return mgmt_cmd_status(sk, hdev->id,
4673 				       MGMT_OP_SET_EXP_FEATURE,
4674 				       MGMT_STATUS_INVALID_PARAMS);
4675 
4676 	/* Only boolean on/off is supported */
4677 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4678 		return mgmt_cmd_status(sk, hdev->id,
4679 				       MGMT_OP_SET_EXP_FEATURE,
4680 				       MGMT_STATUS_INVALID_PARAMS);
4681 
4682 	val = !!cp->param[0];
4683 
4684 	if (val) {
4685 		changed = !hci_dev_test_and_set_flag(hdev,
4686 						     HCI_MESH_EXPERIMENTAL);
4687 	} else {
4688 		hci_dev_clear_flag(hdev, HCI_MESH);
4689 		changed = hci_dev_test_and_clear_flag(hdev,
4690 						      HCI_MESH_EXPERIMENTAL);
4691 	}
4692 
4693 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4694 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4695 
4696 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4697 
4698 	err = mgmt_cmd_complete(sk, hdev->id,
4699 				MGMT_OP_SET_EXP_FEATURE, 0,
4700 				&rp, sizeof(rp));
4701 
4702 	if (changed)
4703 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4704 
4705 	return err;
4706 }
4707 
4708 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4709 				   struct mgmt_cp_set_exp_feature *cp,
4710 				   u16 data_len)
4711 {
4712 	struct mgmt_rp_set_exp_feature rp;
4713 	bool val, changed;
4714 	int err;
4715 	u32 flags;
4716 
4717 	/* Command requires to use the controller index */
4718 	if (!hdev)
4719 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4720 				       MGMT_OP_SET_EXP_FEATURE,
4721 				       MGMT_STATUS_INVALID_INDEX);
4722 
4723 	/* Changes can only be made when controller is powered down */
4724 	if (hdev_is_powered(hdev))
4725 		return mgmt_cmd_status(sk, hdev->id,
4726 				       MGMT_OP_SET_EXP_FEATURE,
4727 				       MGMT_STATUS_REJECTED);
4728 
4729 	/* Parameters are limited to a single octet */
4730 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4731 		return mgmt_cmd_status(sk, hdev->id,
4732 				       MGMT_OP_SET_EXP_FEATURE,
4733 				       MGMT_STATUS_INVALID_PARAMS);
4734 
4735 	/* Only boolean on/off is supported */
4736 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4737 		return mgmt_cmd_status(sk, hdev->id,
4738 				       MGMT_OP_SET_EXP_FEATURE,
4739 				       MGMT_STATUS_INVALID_PARAMS);
4740 
4741 	val = !!cp->param[0];
4742 
4743 	if (val) {
4744 		changed = !hci_dev_test_and_set_flag(hdev,
4745 						     HCI_ENABLE_LL_PRIVACY);
4746 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4747 
4748 		/* Enable LL privacy + supported settings changed */
4749 		flags = BIT(0) | BIT(1);
4750 	} else {
4751 		changed = hci_dev_test_and_clear_flag(hdev,
4752 						      HCI_ENABLE_LL_PRIVACY);
4753 
4754 		/* Disable LL privacy + supported settings changed */
4755 		flags = BIT(1);
4756 	}
4757 
4758 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4759 	rp.flags = cpu_to_le32(flags);
4760 
4761 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4762 
4763 	err = mgmt_cmd_complete(sk, hdev->id,
4764 				MGMT_OP_SET_EXP_FEATURE, 0,
4765 				&rp, sizeof(rp));
4766 
4767 	if (changed)
4768 		exp_ll_privacy_feature_changed(val, hdev, sk);
4769 
4770 	return err;
4771 }
4772 
4773 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4774 				   struct mgmt_cp_set_exp_feature *cp,
4775 				   u16 data_len)
4776 {
4777 	struct mgmt_rp_set_exp_feature rp;
4778 	bool val, changed;
4779 	int err;
4780 
4781 	/* Command requires to use a valid controller index */
4782 	if (!hdev)
4783 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4784 				       MGMT_OP_SET_EXP_FEATURE,
4785 				       MGMT_STATUS_INVALID_INDEX);
4786 
4787 	/* Parameters are limited to a single octet */
4788 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4789 		return mgmt_cmd_status(sk, hdev->id,
4790 				       MGMT_OP_SET_EXP_FEATURE,
4791 				       MGMT_STATUS_INVALID_PARAMS);
4792 
4793 	/* Only boolean on/off is supported */
4794 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4795 		return mgmt_cmd_status(sk, hdev->id,
4796 				       MGMT_OP_SET_EXP_FEATURE,
4797 				       MGMT_STATUS_INVALID_PARAMS);
4798 
4799 	hci_req_sync_lock(hdev);
4800 
4801 	val = !!cp->param[0];
4802 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4803 
4804 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4805 		err = mgmt_cmd_status(sk, hdev->id,
4806 				      MGMT_OP_SET_EXP_FEATURE,
4807 				      MGMT_STATUS_NOT_SUPPORTED);
4808 		goto unlock_quality_report;
4809 	}
4810 
4811 	if (changed) {
4812 		if (hdev->set_quality_report)
4813 			err = hdev->set_quality_report(hdev, val);
4814 		else
4815 			err = aosp_set_quality_report(hdev, val);
4816 
4817 		if (err) {
4818 			err = mgmt_cmd_status(sk, hdev->id,
4819 					      MGMT_OP_SET_EXP_FEATURE,
4820 					      MGMT_STATUS_FAILED);
4821 			goto unlock_quality_report;
4822 		}
4823 
4824 		if (val)
4825 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4826 		else
4827 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4828 	}
4829 
4830 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4831 
4832 	memcpy(rp.uuid, quality_report_uuid, 16);
4833 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4834 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4835 
4836 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4837 				&rp, sizeof(rp));
4838 
4839 	if (changed)
4840 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4841 
4842 unlock_quality_report:
4843 	hci_req_sync_unlock(hdev);
4844 	return err;
4845 }
4846 
4847 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4848 				  struct mgmt_cp_set_exp_feature *cp,
4849 				  u16 data_len)
4850 {
4851 	bool val, changed;
4852 	int err;
4853 	struct mgmt_rp_set_exp_feature rp;
4854 
4855 	/* Command requires to use a valid controller index */
4856 	if (!hdev)
4857 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4858 				       MGMT_OP_SET_EXP_FEATURE,
4859 				       MGMT_STATUS_INVALID_INDEX);
4860 
4861 	/* Parameters are limited to a single octet */
4862 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4863 		return mgmt_cmd_status(sk, hdev->id,
4864 				       MGMT_OP_SET_EXP_FEATURE,
4865 				       MGMT_STATUS_INVALID_PARAMS);
4866 
4867 	/* Only boolean on/off is supported */
4868 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4869 		return mgmt_cmd_status(sk, hdev->id,
4870 				       MGMT_OP_SET_EXP_FEATURE,
4871 				       MGMT_STATUS_INVALID_PARAMS);
4872 
4873 	val = !!cp->param[0];
4874 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4875 
4876 	if (!hdev->get_data_path_id) {
4877 		return mgmt_cmd_status(sk, hdev->id,
4878 				       MGMT_OP_SET_EXP_FEATURE,
4879 				       MGMT_STATUS_NOT_SUPPORTED);
4880 	}
4881 
4882 	if (changed) {
4883 		if (val)
4884 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4885 		else
4886 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4887 	}
4888 
4889 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4890 		    val, changed);
4891 
4892 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4893 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4894 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4895 	err = mgmt_cmd_complete(sk, hdev->id,
4896 				MGMT_OP_SET_EXP_FEATURE, 0,
4897 				&rp, sizeof(rp));
4898 
4899 	if (changed)
4900 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4901 
4902 	return err;
4903 }
4904 
4905 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4906 					  struct mgmt_cp_set_exp_feature *cp,
4907 					  u16 data_len)
4908 {
4909 	bool val, changed;
4910 	int err;
4911 	struct mgmt_rp_set_exp_feature rp;
4912 
4913 	/* Command requires to use a valid controller index */
4914 	if (!hdev)
4915 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4916 				       MGMT_OP_SET_EXP_FEATURE,
4917 				       MGMT_STATUS_INVALID_INDEX);
4918 
4919 	/* Parameters are limited to a single octet */
4920 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4921 		return mgmt_cmd_status(sk, hdev->id,
4922 				       MGMT_OP_SET_EXP_FEATURE,
4923 				       MGMT_STATUS_INVALID_PARAMS);
4924 
4925 	/* Only boolean on/off is supported */
4926 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4927 		return mgmt_cmd_status(sk, hdev->id,
4928 				       MGMT_OP_SET_EXP_FEATURE,
4929 				       MGMT_STATUS_INVALID_PARAMS);
4930 
4931 	val = !!cp->param[0];
4932 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4933 
4934 	if (!hci_dev_le_state_simultaneous(hdev)) {
4935 		return mgmt_cmd_status(sk, hdev->id,
4936 				       MGMT_OP_SET_EXP_FEATURE,
4937 				       MGMT_STATUS_NOT_SUPPORTED);
4938 	}
4939 
4940 	if (changed) {
4941 		if (val)
4942 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4943 		else
4944 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4945 	}
4946 
4947 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4948 		    val, changed);
4949 
4950 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4951 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4952 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4953 	err = mgmt_cmd_complete(sk, hdev->id,
4954 				MGMT_OP_SET_EXP_FEATURE, 0,
4955 				&rp, sizeof(rp));
4956 
4957 	if (changed)
4958 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4959 
4960 	return err;
4961 }
4962 
4963 #ifdef CONFIG_BT_LE
4964 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4965 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4966 {
4967 	struct mgmt_rp_set_exp_feature rp;
4968 	bool val, changed = false;
4969 	int err;
4970 
4971 	/* Command requires to use the non-controller index */
4972 	if (hdev)
4973 		return mgmt_cmd_status(sk, hdev->id,
4974 				       MGMT_OP_SET_EXP_FEATURE,
4975 				       MGMT_STATUS_INVALID_INDEX);
4976 
4977 	/* Parameters are limited to a single octet */
4978 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4979 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4980 				       MGMT_OP_SET_EXP_FEATURE,
4981 				       MGMT_STATUS_INVALID_PARAMS);
4982 
4983 	/* Only boolean on/off is supported */
4984 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4985 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4986 				       MGMT_OP_SET_EXP_FEATURE,
4987 				       MGMT_STATUS_INVALID_PARAMS);
4988 
4989 	val = cp->param[0] ? true : false;
4990 	if (val)
4991 		err = iso_init();
4992 	else
4993 		err = iso_exit();
4994 
4995 	if (!err)
4996 		changed = true;
4997 
4998 	memcpy(rp.uuid, iso_socket_uuid, 16);
4999 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
5000 
5001 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
5002 
5003 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
5004 				MGMT_OP_SET_EXP_FEATURE, 0,
5005 				&rp, sizeof(rp));
5006 
5007 	if (changed)
5008 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5009 
5010 	return err;
5011 }
5012 #endif
5013 
5014 static const struct mgmt_exp_feature {
5015 	const u8 *uuid;
5016 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5017 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5018 } exp_features[] = {
5019 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5020 #ifdef CONFIG_BT_FEATURE_DEBUG
5021 	EXP_FEAT(debug_uuid, set_debug_func),
5022 #endif
5023 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5024 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5025 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5026 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5027 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5028 #ifdef CONFIG_BT_LE
5029 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5030 #endif
5031 
5032 	/* end with a null feature */
5033 	EXP_FEAT(NULL, NULL)
5034 };
5035 
5036 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5037 			   void *data, u16 data_len)
5038 {
5039 	struct mgmt_cp_set_exp_feature *cp = data;
5040 	size_t i = 0;
5041 
5042 	bt_dev_dbg(hdev, "sock %p", sk);
5043 
5044 	for (i = 0; exp_features[i].uuid; i++) {
5045 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5046 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5047 	}
5048 
5049 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5050 			       MGMT_OP_SET_EXP_FEATURE,
5051 			       MGMT_STATUS_NOT_SUPPORTED);
5052 }
5053 
5054 static u32 get_params_flags(struct hci_dev *hdev,
5055 			    struct hci_conn_params *params)
5056 {
5057 	u32 flags = hdev->conn_flags;
5058 
5059 	/* Devices using RPAs can only be programmed in the acceptlist if
5060 	 * LL Privacy has been enable otherwise they cannot mark
5061 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5062 	 */
5063 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5064 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5065 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5066 
5067 	return flags;
5068 }
5069 
5070 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5071 			    u16 data_len)
5072 {
5073 	struct mgmt_cp_get_device_flags *cp = data;
5074 	struct mgmt_rp_get_device_flags rp;
5075 	struct bdaddr_list_with_flags *br_params;
5076 	struct hci_conn_params *params;
5077 	u32 supported_flags;
5078 	u32 current_flags = 0;
5079 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5080 
5081 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5082 		   &cp->addr.bdaddr, cp->addr.type);
5083 
5084 	hci_dev_lock(hdev);
5085 
5086 	supported_flags = hdev->conn_flags;
5087 
5088 	memset(&rp, 0, sizeof(rp));
5089 
5090 	if (cp->addr.type == BDADDR_BREDR) {
5091 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5092 							      &cp->addr.bdaddr,
5093 							      cp->addr.type);
5094 		if (!br_params)
5095 			goto done;
5096 
5097 		current_flags = br_params->flags;
5098 	} else {
5099 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5100 						le_addr_type(cp->addr.type));
5101 		if (!params)
5102 			goto done;
5103 
5104 		supported_flags = get_params_flags(hdev, params);
5105 		current_flags = params->flags;
5106 	}
5107 
5108 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5109 	rp.addr.type = cp->addr.type;
5110 	rp.supported_flags = cpu_to_le32(supported_flags);
5111 	rp.current_flags = cpu_to_le32(current_flags);
5112 
5113 	status = MGMT_STATUS_SUCCESS;
5114 
5115 done:
5116 	hci_dev_unlock(hdev);
5117 
5118 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5119 				&rp, sizeof(rp));
5120 }
5121 
5122 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5123 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5124 				 u32 supported_flags, u32 current_flags)
5125 {
5126 	struct mgmt_ev_device_flags_changed ev;
5127 
5128 	bacpy(&ev.addr.bdaddr, bdaddr);
5129 	ev.addr.type = bdaddr_type;
5130 	ev.supported_flags = cpu_to_le32(supported_flags);
5131 	ev.current_flags = cpu_to_le32(current_flags);
5132 
5133 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5134 }
5135 
5136 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5137 			    u16 len)
5138 {
5139 	struct mgmt_cp_set_device_flags *cp = data;
5140 	struct bdaddr_list_with_flags *br_params;
5141 	struct hci_conn_params *params;
5142 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5143 	u32 supported_flags;
5144 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5145 
5146 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5147 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5148 
5149 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5150 	supported_flags = hdev->conn_flags;
5151 
5152 	if ((supported_flags | current_flags) != supported_flags) {
5153 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5154 			    current_flags, supported_flags);
5155 		goto done;
5156 	}
5157 
5158 	hci_dev_lock(hdev);
5159 
5160 	if (cp->addr.type == BDADDR_BREDR) {
5161 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5162 							      &cp->addr.bdaddr,
5163 							      cp->addr.type);
5164 
5165 		if (br_params) {
5166 			br_params->flags = current_flags;
5167 			status = MGMT_STATUS_SUCCESS;
5168 		} else {
5169 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5170 				    &cp->addr.bdaddr, cp->addr.type);
5171 		}
5172 
5173 		goto unlock;
5174 	}
5175 
5176 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5177 					le_addr_type(cp->addr.type));
5178 	if (!params) {
5179 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5180 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5181 		goto unlock;
5182 	}
5183 
5184 	supported_flags = get_params_flags(hdev, params);
5185 
5186 	if ((supported_flags | current_flags) != supported_flags) {
5187 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5188 			    current_flags, supported_flags);
5189 		goto unlock;
5190 	}
5191 
5192 	WRITE_ONCE(params->flags, current_flags);
5193 	status = MGMT_STATUS_SUCCESS;
5194 
5195 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5196 	 * has been set.
5197 	 */
5198 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5199 		hci_update_passive_scan(hdev);
5200 
5201 unlock:
5202 	hci_dev_unlock(hdev);
5203 
5204 done:
5205 	if (status == MGMT_STATUS_SUCCESS)
5206 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5207 				     supported_flags, current_flags);
5208 
5209 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5210 				 &cp->addr, sizeof(cp->addr));
5211 }
5212 
5213 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5214 				   u16 handle)
5215 {
5216 	struct mgmt_ev_adv_monitor_added ev;
5217 
5218 	ev.monitor_handle = cpu_to_le16(handle);
5219 
5220 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5221 }
5222 
5223 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5224 {
5225 	struct mgmt_ev_adv_monitor_removed ev;
5226 	struct mgmt_pending_cmd *cmd;
5227 	struct sock *sk_skip = NULL;
5228 	struct mgmt_cp_remove_adv_monitor *cp;
5229 
5230 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5231 	if (cmd) {
5232 		cp = cmd->param;
5233 
5234 		if (cp->monitor_handle)
5235 			sk_skip = cmd->sk;
5236 	}
5237 
5238 	ev.monitor_handle = cpu_to_le16(handle);
5239 
5240 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5241 }
5242 
5243 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5244 				 void *data, u16 len)
5245 {
5246 	struct adv_monitor *monitor = NULL;
5247 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5248 	int handle, err;
5249 	size_t rp_size = 0;
5250 	__u32 supported = 0;
5251 	__u32 enabled = 0;
5252 	__u16 num_handles = 0;
5253 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5254 
5255 	BT_DBG("request for %s", hdev->name);
5256 
5257 	hci_dev_lock(hdev);
5258 
5259 	if (msft_monitor_supported(hdev))
5260 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5261 
5262 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5263 		handles[num_handles++] = monitor->handle;
5264 
5265 	hci_dev_unlock(hdev);
5266 
5267 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5268 	rp = kmalloc(rp_size, GFP_KERNEL);
5269 	if (!rp)
5270 		return -ENOMEM;
5271 
5272 	/* All supported features are currently enabled */
5273 	enabled = supported;
5274 
5275 	rp->supported_features = cpu_to_le32(supported);
5276 	rp->enabled_features = cpu_to_le32(enabled);
5277 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5278 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5279 	rp->num_handles = cpu_to_le16(num_handles);
5280 	if (num_handles)
5281 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5282 
5283 	err = mgmt_cmd_complete(sk, hdev->id,
5284 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5285 				MGMT_STATUS_SUCCESS, rp, rp_size);
5286 
5287 	kfree(rp);
5288 
5289 	return err;
5290 }
5291 
5292 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5293 						   void *data, int status)
5294 {
5295 	struct mgmt_rp_add_adv_patterns_monitor rp;
5296 	struct mgmt_pending_cmd *cmd = data;
5297 	struct adv_monitor *monitor = cmd->user_data;
5298 
5299 	hci_dev_lock(hdev);
5300 
5301 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5302 
5303 	if (!status) {
5304 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5305 		hdev->adv_monitors_cnt++;
5306 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5307 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5308 		hci_update_passive_scan(hdev);
5309 	}
5310 
5311 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5312 			  mgmt_status(status), &rp, sizeof(rp));
5313 	mgmt_pending_remove(cmd);
5314 
5315 	hci_dev_unlock(hdev);
5316 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5317 		   rp.monitor_handle, status);
5318 }
5319 
5320 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5321 {
5322 	struct mgmt_pending_cmd *cmd = data;
5323 	struct adv_monitor *monitor = cmd->user_data;
5324 
5325 	return hci_add_adv_monitor(hdev, monitor);
5326 }
5327 
5328 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5329 				      struct adv_monitor *m, u8 status,
5330 				      void *data, u16 len, u16 op)
5331 {
5332 	struct mgmt_pending_cmd *cmd;
5333 	int err;
5334 
5335 	hci_dev_lock(hdev);
5336 
5337 	if (status)
5338 		goto unlock;
5339 
5340 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5341 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5342 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5343 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5344 		status = MGMT_STATUS_BUSY;
5345 		goto unlock;
5346 	}
5347 
5348 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5349 	if (!cmd) {
5350 		status = MGMT_STATUS_NO_RESOURCES;
5351 		goto unlock;
5352 	}
5353 
5354 	cmd->user_data = m;
5355 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5356 				 mgmt_add_adv_patterns_monitor_complete);
5357 	if (err) {
5358 		if (err == -ENOMEM)
5359 			status = MGMT_STATUS_NO_RESOURCES;
5360 		else
5361 			status = MGMT_STATUS_FAILED;
5362 
5363 		goto unlock;
5364 	}
5365 
5366 	hci_dev_unlock(hdev);
5367 
5368 	return 0;
5369 
5370 unlock:
5371 	hci_free_adv_monitor(hdev, m);
5372 	hci_dev_unlock(hdev);
5373 	return mgmt_cmd_status(sk, hdev->id, op, status);
5374 }
5375 
5376 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5377 				   struct mgmt_adv_rssi_thresholds *rssi)
5378 {
5379 	if (rssi) {
5380 		m->rssi.low_threshold = rssi->low_threshold;
5381 		m->rssi.low_threshold_timeout =
5382 		    __le16_to_cpu(rssi->low_threshold_timeout);
5383 		m->rssi.high_threshold = rssi->high_threshold;
5384 		m->rssi.high_threshold_timeout =
5385 		    __le16_to_cpu(rssi->high_threshold_timeout);
5386 		m->rssi.sampling_period = rssi->sampling_period;
5387 	} else {
5388 		/* Default values. These numbers are the least constricting
5389 		 * parameters for MSFT API to work, so it behaves as if there
5390 		 * are no rssi parameter to consider. May need to be changed
5391 		 * if other API are to be supported.
5392 		 */
5393 		m->rssi.low_threshold = -127;
5394 		m->rssi.low_threshold_timeout = 60;
5395 		m->rssi.high_threshold = -127;
5396 		m->rssi.high_threshold_timeout = 0;
5397 		m->rssi.sampling_period = 0;
5398 	}
5399 }
5400 
5401 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5402 				    struct mgmt_adv_pattern *patterns)
5403 {
5404 	u8 offset = 0, length = 0;
5405 	struct adv_pattern *p = NULL;
5406 	int i;
5407 
5408 	for (i = 0; i < pattern_count; i++) {
5409 		offset = patterns[i].offset;
5410 		length = patterns[i].length;
5411 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5412 		    length > HCI_MAX_EXT_AD_LENGTH ||
5413 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5414 			return MGMT_STATUS_INVALID_PARAMS;
5415 
5416 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5417 		if (!p)
5418 			return MGMT_STATUS_NO_RESOURCES;
5419 
5420 		p->ad_type = patterns[i].ad_type;
5421 		p->offset = patterns[i].offset;
5422 		p->length = patterns[i].length;
5423 		memcpy(p->value, patterns[i].value, p->length);
5424 
5425 		INIT_LIST_HEAD(&p->list);
5426 		list_add(&p->list, &m->patterns);
5427 	}
5428 
5429 	return MGMT_STATUS_SUCCESS;
5430 }
5431 
5432 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5433 				    void *data, u16 len)
5434 {
5435 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5436 	struct adv_monitor *m = NULL;
5437 	u8 status = MGMT_STATUS_SUCCESS;
5438 	size_t expected_size = sizeof(*cp);
5439 
5440 	BT_DBG("request for %s", hdev->name);
5441 
5442 	if (len <= sizeof(*cp)) {
5443 		status = MGMT_STATUS_INVALID_PARAMS;
5444 		goto done;
5445 	}
5446 
5447 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5448 	if (len != expected_size) {
5449 		status = MGMT_STATUS_INVALID_PARAMS;
5450 		goto done;
5451 	}
5452 
5453 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5454 	if (!m) {
5455 		status = MGMT_STATUS_NO_RESOURCES;
5456 		goto done;
5457 	}
5458 
5459 	INIT_LIST_HEAD(&m->patterns);
5460 
5461 	parse_adv_monitor_rssi(m, NULL);
5462 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5463 
5464 done:
5465 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5466 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5467 }
5468 
5469 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5470 					 void *data, u16 len)
5471 {
5472 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5473 	struct adv_monitor *m = NULL;
5474 	u8 status = MGMT_STATUS_SUCCESS;
5475 	size_t expected_size = sizeof(*cp);
5476 
5477 	BT_DBG("request for %s", hdev->name);
5478 
5479 	if (len <= sizeof(*cp)) {
5480 		status = MGMT_STATUS_INVALID_PARAMS;
5481 		goto done;
5482 	}
5483 
5484 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5485 	if (len != expected_size) {
5486 		status = MGMT_STATUS_INVALID_PARAMS;
5487 		goto done;
5488 	}
5489 
5490 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5491 	if (!m) {
5492 		status = MGMT_STATUS_NO_RESOURCES;
5493 		goto done;
5494 	}
5495 
5496 	INIT_LIST_HEAD(&m->patterns);
5497 
5498 	parse_adv_monitor_rssi(m, &cp->rssi);
5499 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5500 
5501 done:
5502 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5503 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5504 }
5505 
5506 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5507 					     void *data, int status)
5508 {
5509 	struct mgmt_rp_remove_adv_monitor rp;
5510 	struct mgmt_pending_cmd *cmd = data;
5511 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5512 
5513 	hci_dev_lock(hdev);
5514 
5515 	rp.monitor_handle = cp->monitor_handle;
5516 
5517 	if (!status)
5518 		hci_update_passive_scan(hdev);
5519 
5520 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5521 			  mgmt_status(status), &rp, sizeof(rp));
5522 	mgmt_pending_remove(cmd);
5523 
5524 	hci_dev_unlock(hdev);
5525 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5526 		   rp.monitor_handle, status);
5527 }
5528 
5529 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5530 {
5531 	struct mgmt_pending_cmd *cmd = data;
5532 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5533 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5534 
5535 	if (!handle)
5536 		return hci_remove_all_adv_monitor(hdev);
5537 
5538 	return hci_remove_single_adv_monitor(hdev, handle);
5539 }
5540 
5541 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5542 			      void *data, u16 len)
5543 {
5544 	struct mgmt_pending_cmd *cmd;
5545 	int err, status;
5546 
5547 	hci_dev_lock(hdev);
5548 
5549 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5550 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5551 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5552 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5553 		status = MGMT_STATUS_BUSY;
5554 		goto unlock;
5555 	}
5556 
5557 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5558 	if (!cmd) {
5559 		status = MGMT_STATUS_NO_RESOURCES;
5560 		goto unlock;
5561 	}
5562 
5563 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5564 				  mgmt_remove_adv_monitor_complete);
5565 
5566 	if (err) {
5567 		mgmt_pending_remove(cmd);
5568 
5569 		if (err == -ENOMEM)
5570 			status = MGMT_STATUS_NO_RESOURCES;
5571 		else
5572 			status = MGMT_STATUS_FAILED;
5573 
5574 		goto unlock;
5575 	}
5576 
5577 	hci_dev_unlock(hdev);
5578 
5579 	return 0;
5580 
5581 unlock:
5582 	hci_dev_unlock(hdev);
5583 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5584 			       status);
5585 }
5586 
5587 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5588 {
5589 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5590 	size_t rp_size = sizeof(mgmt_rp);
5591 	struct mgmt_pending_cmd *cmd = data;
5592 	struct sk_buff *skb = cmd->skb;
5593 	u8 status = mgmt_status(err);
5594 
5595 	if (!status) {
5596 		if (!skb)
5597 			status = MGMT_STATUS_FAILED;
5598 		else if (IS_ERR(skb))
5599 			status = mgmt_status(PTR_ERR(skb));
5600 		else
5601 			status = mgmt_status(skb->data[0]);
5602 	}
5603 
5604 	bt_dev_dbg(hdev, "status %d", status);
5605 
5606 	if (status) {
5607 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5608 		goto remove;
5609 	}
5610 
5611 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5612 
5613 	if (!bredr_sc_enabled(hdev)) {
5614 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5615 
5616 		if (skb->len < sizeof(*rp)) {
5617 			mgmt_cmd_status(cmd->sk, hdev->id,
5618 					MGMT_OP_READ_LOCAL_OOB_DATA,
5619 					MGMT_STATUS_FAILED);
5620 			goto remove;
5621 		}
5622 
5623 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5624 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5625 
5626 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5627 	} else {
5628 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5629 
5630 		if (skb->len < sizeof(*rp)) {
5631 			mgmt_cmd_status(cmd->sk, hdev->id,
5632 					MGMT_OP_READ_LOCAL_OOB_DATA,
5633 					MGMT_STATUS_FAILED);
5634 			goto remove;
5635 		}
5636 
5637 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5638 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5639 
5640 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5641 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5642 	}
5643 
5644 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5645 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5646 
5647 remove:
5648 	if (skb && !IS_ERR(skb))
5649 		kfree_skb(skb);
5650 
5651 	mgmt_pending_free(cmd);
5652 }
5653 
5654 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5655 {
5656 	struct mgmt_pending_cmd *cmd = data;
5657 
5658 	if (bredr_sc_enabled(hdev))
5659 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5660 	else
5661 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5662 
5663 	if (IS_ERR(cmd->skb))
5664 		return PTR_ERR(cmd->skb);
5665 	else
5666 		return 0;
5667 }
5668 
5669 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5670 			       void *data, u16 data_len)
5671 {
5672 	struct mgmt_pending_cmd *cmd;
5673 	int err;
5674 
5675 	bt_dev_dbg(hdev, "sock %p", sk);
5676 
5677 	hci_dev_lock(hdev);
5678 
5679 	if (!hdev_is_powered(hdev)) {
5680 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5681 				      MGMT_STATUS_NOT_POWERED);
5682 		goto unlock;
5683 	}
5684 
5685 	if (!lmp_ssp_capable(hdev)) {
5686 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5687 				      MGMT_STATUS_NOT_SUPPORTED);
5688 		goto unlock;
5689 	}
5690 
5691 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5692 	if (!cmd)
5693 		err = -ENOMEM;
5694 	else
5695 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5696 					 read_local_oob_data_complete);
5697 
5698 	if (err < 0) {
5699 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5700 				      MGMT_STATUS_FAILED);
5701 
5702 		if (cmd)
5703 			mgmt_pending_free(cmd);
5704 	}
5705 
5706 unlock:
5707 	hci_dev_unlock(hdev);
5708 	return err;
5709 }
5710 
5711 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5712 			       void *data, u16 len)
5713 {
5714 	struct mgmt_addr_info *addr = data;
5715 	int err;
5716 
5717 	bt_dev_dbg(hdev, "sock %p", sk);
5718 
5719 	if (!bdaddr_type_is_valid(addr->type))
5720 		return mgmt_cmd_complete(sk, hdev->id,
5721 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5722 					 MGMT_STATUS_INVALID_PARAMS,
5723 					 addr, sizeof(*addr));
5724 
5725 	hci_dev_lock(hdev);
5726 
5727 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5728 		struct mgmt_cp_add_remote_oob_data *cp = data;
5729 		u8 status;
5730 
5731 		if (cp->addr.type != BDADDR_BREDR) {
5732 			err = mgmt_cmd_complete(sk, hdev->id,
5733 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5734 						MGMT_STATUS_INVALID_PARAMS,
5735 						&cp->addr, sizeof(cp->addr));
5736 			goto unlock;
5737 		}
5738 
5739 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5740 					      cp->addr.type, cp->hash,
5741 					      cp->rand, NULL, NULL);
5742 		if (err < 0)
5743 			status = MGMT_STATUS_FAILED;
5744 		else
5745 			status = MGMT_STATUS_SUCCESS;
5746 
5747 		err = mgmt_cmd_complete(sk, hdev->id,
5748 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5749 					&cp->addr, sizeof(cp->addr));
5750 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5751 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5752 		u8 *rand192, *hash192, *rand256, *hash256;
5753 		u8 status;
5754 
5755 		if (bdaddr_type_is_le(cp->addr.type)) {
5756 			/* Enforce zero-valued 192-bit parameters as
5757 			 * long as legacy SMP OOB isn't implemented.
5758 			 */
5759 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5760 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5761 				err = mgmt_cmd_complete(sk, hdev->id,
5762 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5763 							MGMT_STATUS_INVALID_PARAMS,
5764 							addr, sizeof(*addr));
5765 				goto unlock;
5766 			}
5767 
5768 			rand192 = NULL;
5769 			hash192 = NULL;
5770 		} else {
5771 			/* In case one of the P-192 values is set to zero,
5772 			 * then just disable OOB data for P-192.
5773 			 */
5774 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5775 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5776 				rand192 = NULL;
5777 				hash192 = NULL;
5778 			} else {
5779 				rand192 = cp->rand192;
5780 				hash192 = cp->hash192;
5781 			}
5782 		}
5783 
5784 		/* In case one of the P-256 values is set to zero, then just
5785 		 * disable OOB data for P-256.
5786 		 */
5787 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5788 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5789 			rand256 = NULL;
5790 			hash256 = NULL;
5791 		} else {
5792 			rand256 = cp->rand256;
5793 			hash256 = cp->hash256;
5794 		}
5795 
5796 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5797 					      cp->addr.type, hash192, rand192,
5798 					      hash256, rand256);
5799 		if (err < 0)
5800 			status = MGMT_STATUS_FAILED;
5801 		else
5802 			status = MGMT_STATUS_SUCCESS;
5803 
5804 		err = mgmt_cmd_complete(sk, hdev->id,
5805 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5806 					status, &cp->addr, sizeof(cp->addr));
5807 	} else {
5808 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5809 			   len);
5810 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5811 				      MGMT_STATUS_INVALID_PARAMS);
5812 	}
5813 
5814 unlock:
5815 	hci_dev_unlock(hdev);
5816 	return err;
5817 }
5818 
5819 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5820 				  void *data, u16 len)
5821 {
5822 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5823 	u8 status;
5824 	int err;
5825 
5826 	bt_dev_dbg(hdev, "sock %p", sk);
5827 
5828 	if (cp->addr.type != BDADDR_BREDR)
5829 		return mgmt_cmd_complete(sk, hdev->id,
5830 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5831 					 MGMT_STATUS_INVALID_PARAMS,
5832 					 &cp->addr, sizeof(cp->addr));
5833 
5834 	hci_dev_lock(hdev);
5835 
5836 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5837 		hci_remote_oob_data_clear(hdev);
5838 		status = MGMT_STATUS_SUCCESS;
5839 		goto done;
5840 	}
5841 
5842 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5843 	if (err < 0)
5844 		status = MGMT_STATUS_INVALID_PARAMS;
5845 	else
5846 		status = MGMT_STATUS_SUCCESS;
5847 
5848 done:
5849 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5850 				status, &cp->addr, sizeof(cp->addr));
5851 
5852 	hci_dev_unlock(hdev);
5853 	return err;
5854 }
5855 
5856 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5857 {
5858 	struct mgmt_pending_cmd *cmd;
5859 
5860 	bt_dev_dbg(hdev, "status %u", status);
5861 
5862 	hci_dev_lock(hdev);
5863 
5864 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5865 	if (!cmd)
5866 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5867 
5868 	if (!cmd)
5869 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5870 
5871 	if (cmd) {
5872 		cmd->cmd_complete(cmd, mgmt_status(status));
5873 		mgmt_pending_remove(cmd);
5874 	}
5875 
5876 	hci_dev_unlock(hdev);
5877 }
5878 
5879 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5880 				    uint8_t *mgmt_status)
5881 {
5882 	switch (type) {
5883 	case DISCOV_TYPE_LE:
5884 		*mgmt_status = mgmt_le_support(hdev);
5885 		if (*mgmt_status)
5886 			return false;
5887 		break;
5888 	case DISCOV_TYPE_INTERLEAVED:
5889 		*mgmt_status = mgmt_le_support(hdev);
5890 		if (*mgmt_status)
5891 			return false;
5892 		fallthrough;
5893 	case DISCOV_TYPE_BREDR:
5894 		*mgmt_status = mgmt_bredr_support(hdev);
5895 		if (*mgmt_status)
5896 			return false;
5897 		break;
5898 	default:
5899 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5900 		return false;
5901 	}
5902 
5903 	return true;
5904 }
5905 
5906 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5907 {
5908 	struct mgmt_pending_cmd *cmd = data;
5909 
5910 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5911 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5912 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5913 		return;
5914 
5915 	bt_dev_dbg(hdev, "err %d", err);
5916 
5917 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5918 			  cmd->param, 1);
5919 	mgmt_pending_remove(cmd);
5920 
5921 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5922 				DISCOVERY_FINDING);
5923 }
5924 
5925 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5926 {
5927 	return hci_start_discovery_sync(hdev);
5928 }
5929 
5930 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5931 				    u16 op, void *data, u16 len)
5932 {
5933 	struct mgmt_cp_start_discovery *cp = data;
5934 	struct mgmt_pending_cmd *cmd;
5935 	u8 status;
5936 	int err;
5937 
5938 	bt_dev_dbg(hdev, "sock %p", sk);
5939 
5940 	hci_dev_lock(hdev);
5941 
5942 	if (!hdev_is_powered(hdev)) {
5943 		err = mgmt_cmd_complete(sk, hdev->id, op,
5944 					MGMT_STATUS_NOT_POWERED,
5945 					&cp->type, sizeof(cp->type));
5946 		goto failed;
5947 	}
5948 
5949 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5950 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5951 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5952 					&cp->type, sizeof(cp->type));
5953 		goto failed;
5954 	}
5955 
5956 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5957 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5958 					&cp->type, sizeof(cp->type));
5959 		goto failed;
5960 	}
5961 
5962 	/* Can't start discovery when it is paused */
5963 	if (hdev->discovery_paused) {
5964 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5965 					&cp->type, sizeof(cp->type));
5966 		goto failed;
5967 	}
5968 
5969 	/* Clear the discovery filter first to free any previously
5970 	 * allocated memory for the UUID list.
5971 	 */
5972 	hci_discovery_filter_clear(hdev);
5973 
5974 	hdev->discovery.type = cp->type;
5975 	hdev->discovery.report_invalid_rssi = false;
5976 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5977 		hdev->discovery.limited = true;
5978 	else
5979 		hdev->discovery.limited = false;
5980 
5981 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5982 	if (!cmd) {
5983 		err = -ENOMEM;
5984 		goto failed;
5985 	}
5986 
5987 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5988 				 start_discovery_complete);
5989 	if (err < 0) {
5990 		mgmt_pending_remove(cmd);
5991 		goto failed;
5992 	}
5993 
5994 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5995 
5996 failed:
5997 	hci_dev_unlock(hdev);
5998 	return err;
5999 }
6000 
6001 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
6002 			   void *data, u16 len)
6003 {
6004 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
6005 					data, len);
6006 }
6007 
6008 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
6009 				   void *data, u16 len)
6010 {
6011 	return start_discovery_internal(sk, hdev,
6012 					MGMT_OP_START_LIMITED_DISCOVERY,
6013 					data, len);
6014 }
6015 
6016 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
6017 				   void *data, u16 len)
6018 {
6019 	struct mgmt_cp_start_service_discovery *cp = data;
6020 	struct mgmt_pending_cmd *cmd;
6021 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6022 	u16 uuid_count, expected_len;
6023 	u8 status;
6024 	int err;
6025 
6026 	bt_dev_dbg(hdev, "sock %p", sk);
6027 
6028 	hci_dev_lock(hdev);
6029 
6030 	if (!hdev_is_powered(hdev)) {
6031 		err = mgmt_cmd_complete(sk, hdev->id,
6032 					MGMT_OP_START_SERVICE_DISCOVERY,
6033 					MGMT_STATUS_NOT_POWERED,
6034 					&cp->type, sizeof(cp->type));
6035 		goto failed;
6036 	}
6037 
6038 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6039 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6040 		err = mgmt_cmd_complete(sk, hdev->id,
6041 					MGMT_OP_START_SERVICE_DISCOVERY,
6042 					MGMT_STATUS_BUSY, &cp->type,
6043 					sizeof(cp->type));
6044 		goto failed;
6045 	}
6046 
6047 	if (hdev->discovery_paused) {
6048 		err = mgmt_cmd_complete(sk, hdev->id,
6049 					MGMT_OP_START_SERVICE_DISCOVERY,
6050 					MGMT_STATUS_BUSY, &cp->type,
6051 					sizeof(cp->type));
6052 		goto failed;
6053 	}
6054 
6055 	uuid_count = __le16_to_cpu(cp->uuid_count);
6056 	if (uuid_count > max_uuid_count) {
6057 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6058 			   uuid_count);
6059 		err = mgmt_cmd_complete(sk, hdev->id,
6060 					MGMT_OP_START_SERVICE_DISCOVERY,
6061 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6062 					sizeof(cp->type));
6063 		goto failed;
6064 	}
6065 
6066 	expected_len = sizeof(*cp) + uuid_count * 16;
6067 	if (expected_len != len) {
6068 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6069 			   expected_len, len);
6070 		err = mgmt_cmd_complete(sk, hdev->id,
6071 					MGMT_OP_START_SERVICE_DISCOVERY,
6072 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6073 					sizeof(cp->type));
6074 		goto failed;
6075 	}
6076 
6077 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6078 		err = mgmt_cmd_complete(sk, hdev->id,
6079 					MGMT_OP_START_SERVICE_DISCOVERY,
6080 					status, &cp->type, sizeof(cp->type));
6081 		goto failed;
6082 	}
6083 
6084 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6085 			       hdev, data, len);
6086 	if (!cmd) {
6087 		err = -ENOMEM;
6088 		goto failed;
6089 	}
6090 
6091 	/* Clear the discovery filter first to free any previously
6092 	 * allocated memory for the UUID list.
6093 	 */
6094 	hci_discovery_filter_clear(hdev);
6095 
6096 	hdev->discovery.result_filtering = true;
6097 	hdev->discovery.type = cp->type;
6098 	hdev->discovery.rssi = cp->rssi;
6099 	hdev->discovery.uuid_count = uuid_count;
6100 
6101 	if (uuid_count > 0) {
6102 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6103 						GFP_KERNEL);
6104 		if (!hdev->discovery.uuids) {
6105 			err = mgmt_cmd_complete(sk, hdev->id,
6106 						MGMT_OP_START_SERVICE_DISCOVERY,
6107 						MGMT_STATUS_FAILED,
6108 						&cp->type, sizeof(cp->type));
6109 			mgmt_pending_remove(cmd);
6110 			goto failed;
6111 		}
6112 	}
6113 
6114 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6115 				 start_discovery_complete);
6116 	if (err < 0) {
6117 		mgmt_pending_remove(cmd);
6118 		goto failed;
6119 	}
6120 
6121 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6122 
6123 failed:
6124 	hci_dev_unlock(hdev);
6125 	return err;
6126 }
6127 
6128 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6129 {
6130 	struct mgmt_pending_cmd *cmd;
6131 
6132 	bt_dev_dbg(hdev, "status %u", status);
6133 
6134 	hci_dev_lock(hdev);
6135 
6136 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6137 	if (cmd) {
6138 		cmd->cmd_complete(cmd, mgmt_status(status));
6139 		mgmt_pending_remove(cmd);
6140 	}
6141 
6142 	hci_dev_unlock(hdev);
6143 }
6144 
6145 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6146 {
6147 	struct mgmt_pending_cmd *cmd = data;
6148 
6149 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6150 		return;
6151 
6152 	bt_dev_dbg(hdev, "err %d", err);
6153 
6154 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6155 			  cmd->param, 1);
6156 	mgmt_pending_remove(cmd);
6157 
6158 	if (!err)
6159 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6160 }
6161 
6162 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6163 {
6164 	return hci_stop_discovery_sync(hdev);
6165 }
6166 
6167 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6168 			  u16 len)
6169 {
6170 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6171 	struct mgmt_pending_cmd *cmd;
6172 	int err;
6173 
6174 	bt_dev_dbg(hdev, "sock %p", sk);
6175 
6176 	hci_dev_lock(hdev);
6177 
6178 	if (!hci_discovery_active(hdev)) {
6179 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6180 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6181 					sizeof(mgmt_cp->type));
6182 		goto unlock;
6183 	}
6184 
6185 	if (hdev->discovery.type != mgmt_cp->type) {
6186 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6187 					MGMT_STATUS_INVALID_PARAMS,
6188 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6189 		goto unlock;
6190 	}
6191 
6192 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6193 	if (!cmd) {
6194 		err = -ENOMEM;
6195 		goto unlock;
6196 	}
6197 
6198 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6199 				 stop_discovery_complete);
6200 	if (err < 0) {
6201 		mgmt_pending_remove(cmd);
6202 		goto unlock;
6203 	}
6204 
6205 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6206 
6207 unlock:
6208 	hci_dev_unlock(hdev);
6209 	return err;
6210 }
6211 
6212 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6213 			u16 len)
6214 {
6215 	struct mgmt_cp_confirm_name *cp = data;
6216 	struct inquiry_entry *e;
6217 	int err;
6218 
6219 	bt_dev_dbg(hdev, "sock %p", sk);
6220 
6221 	hci_dev_lock(hdev);
6222 
6223 	if (!hci_discovery_active(hdev)) {
6224 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6225 					MGMT_STATUS_FAILED, &cp->addr,
6226 					sizeof(cp->addr));
6227 		goto failed;
6228 	}
6229 
6230 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6231 	if (!e) {
6232 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6233 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6234 					sizeof(cp->addr));
6235 		goto failed;
6236 	}
6237 
6238 	if (cp->name_known) {
6239 		e->name_state = NAME_KNOWN;
6240 		list_del(&e->list);
6241 	} else {
6242 		e->name_state = NAME_NEEDED;
6243 		hci_inquiry_cache_update_resolve(hdev, e);
6244 	}
6245 
6246 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6247 				&cp->addr, sizeof(cp->addr));
6248 
6249 failed:
6250 	hci_dev_unlock(hdev);
6251 	return err;
6252 }
6253 
6254 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6255 			u16 len)
6256 {
6257 	struct mgmt_cp_block_device *cp = data;
6258 	u8 status;
6259 	int err;
6260 
6261 	bt_dev_dbg(hdev, "sock %p", sk);
6262 
6263 	if (!bdaddr_type_is_valid(cp->addr.type))
6264 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6265 					 MGMT_STATUS_INVALID_PARAMS,
6266 					 &cp->addr, sizeof(cp->addr));
6267 
6268 	hci_dev_lock(hdev);
6269 
6270 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6271 				  cp->addr.type);
6272 	if (err < 0) {
6273 		status = MGMT_STATUS_FAILED;
6274 		goto done;
6275 	}
6276 
6277 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6278 		   sk);
6279 	status = MGMT_STATUS_SUCCESS;
6280 
6281 done:
6282 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6283 				&cp->addr, sizeof(cp->addr));
6284 
6285 	hci_dev_unlock(hdev);
6286 
6287 	return err;
6288 }
6289 
6290 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6291 			  u16 len)
6292 {
6293 	struct mgmt_cp_unblock_device *cp = data;
6294 	u8 status;
6295 	int err;
6296 
6297 	bt_dev_dbg(hdev, "sock %p", sk);
6298 
6299 	if (!bdaddr_type_is_valid(cp->addr.type))
6300 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6301 					 MGMT_STATUS_INVALID_PARAMS,
6302 					 &cp->addr, sizeof(cp->addr));
6303 
6304 	hci_dev_lock(hdev);
6305 
6306 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6307 				  cp->addr.type);
6308 	if (err < 0) {
6309 		status = MGMT_STATUS_INVALID_PARAMS;
6310 		goto done;
6311 	}
6312 
6313 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6314 		   sk);
6315 	status = MGMT_STATUS_SUCCESS;
6316 
6317 done:
6318 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6319 				&cp->addr, sizeof(cp->addr));
6320 
6321 	hci_dev_unlock(hdev);
6322 
6323 	return err;
6324 }
6325 
6326 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6327 {
6328 	return hci_update_eir_sync(hdev);
6329 }
6330 
6331 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6332 			 u16 len)
6333 {
6334 	struct mgmt_cp_set_device_id *cp = data;
6335 	int err;
6336 	__u16 source;
6337 
6338 	bt_dev_dbg(hdev, "sock %p", sk);
6339 
6340 	source = __le16_to_cpu(cp->source);
6341 
6342 	if (source > 0x0002)
6343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6344 				       MGMT_STATUS_INVALID_PARAMS);
6345 
6346 	hci_dev_lock(hdev);
6347 
6348 	hdev->devid_source = source;
6349 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6350 	hdev->devid_product = __le16_to_cpu(cp->product);
6351 	hdev->devid_version = __le16_to_cpu(cp->version);
6352 
6353 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6354 				NULL, 0);
6355 
6356 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6357 
6358 	hci_dev_unlock(hdev);
6359 
6360 	return err;
6361 }
6362 
6363 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6364 {
6365 	if (err)
6366 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6367 	else
6368 		bt_dev_dbg(hdev, "status %d", err);
6369 }
6370 
6371 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6372 {
6373 	struct cmd_lookup match = { NULL, hdev };
6374 	u8 instance;
6375 	struct adv_info *adv_instance;
6376 	u8 status = mgmt_status(err);
6377 
6378 	if (status) {
6379 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6380 				     cmd_status_rsp, &status);
6381 		return;
6382 	}
6383 
6384 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6385 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6386 	else
6387 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6388 
6389 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6390 			     &match);
6391 
6392 	new_settings(hdev, match.sk);
6393 
6394 	if (match.sk)
6395 		sock_put(match.sk);
6396 
6397 	/* If "Set Advertising" was just disabled and instance advertising was
6398 	 * set up earlier, then re-enable multi-instance advertising.
6399 	 */
6400 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6401 	    list_empty(&hdev->adv_instances))
6402 		return;
6403 
6404 	instance = hdev->cur_adv_instance;
6405 	if (!instance) {
6406 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6407 							struct adv_info, list);
6408 		if (!adv_instance)
6409 			return;
6410 
6411 		instance = adv_instance->instance;
6412 	}
6413 
6414 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6415 
6416 	enable_advertising_instance(hdev, err);
6417 }
6418 
6419 static int set_adv_sync(struct hci_dev *hdev, void *data)
6420 {
6421 	struct mgmt_pending_cmd *cmd = data;
6422 	struct mgmt_mode *cp = cmd->param;
6423 	u8 val = !!cp->val;
6424 
6425 	if (cp->val == 0x02)
6426 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6427 	else
6428 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6429 
6430 	cancel_adv_timeout(hdev);
6431 
6432 	if (val) {
6433 		/* Switch to instance "0" for the Set Advertising setting.
6434 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6435 		 * HCI_ADVERTISING flag is not yet set.
6436 		 */
6437 		hdev->cur_adv_instance = 0x00;
6438 
6439 		if (ext_adv_capable(hdev)) {
6440 			hci_start_ext_adv_sync(hdev, 0x00);
6441 		} else {
6442 			hci_update_adv_data_sync(hdev, 0x00);
6443 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6444 			hci_enable_advertising_sync(hdev);
6445 		}
6446 	} else {
6447 		hci_disable_advertising_sync(hdev);
6448 	}
6449 
6450 	return 0;
6451 }
6452 
6453 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6454 			   u16 len)
6455 {
6456 	struct mgmt_mode *cp = data;
6457 	struct mgmt_pending_cmd *cmd;
6458 	u8 val, status;
6459 	int err;
6460 
6461 	bt_dev_dbg(hdev, "sock %p", sk);
6462 
6463 	status = mgmt_le_support(hdev);
6464 	if (status)
6465 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6466 				       status);
6467 
6468 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6469 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6470 				       MGMT_STATUS_INVALID_PARAMS);
6471 
6472 	if (hdev->advertising_paused)
6473 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6474 				       MGMT_STATUS_BUSY);
6475 
6476 	hci_dev_lock(hdev);
6477 
6478 	val = !!cp->val;
6479 
6480 	/* The following conditions are ones which mean that we should
6481 	 * not do any HCI communication but directly send a mgmt
6482 	 * response to user space (after toggling the flag if
6483 	 * necessary).
6484 	 */
6485 	if (!hdev_is_powered(hdev) ||
6486 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6487 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6488 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6489 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6490 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6491 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6492 		bool changed;
6493 
6494 		if (cp->val) {
6495 			hdev->cur_adv_instance = 0x00;
6496 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6497 			if (cp->val == 0x02)
6498 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6499 			else
6500 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6501 		} else {
6502 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6503 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6504 		}
6505 
6506 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6507 		if (err < 0)
6508 			goto unlock;
6509 
6510 		if (changed)
6511 			err = new_settings(hdev, sk);
6512 
6513 		goto unlock;
6514 	}
6515 
6516 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6517 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6518 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6519 				      MGMT_STATUS_BUSY);
6520 		goto unlock;
6521 	}
6522 
6523 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6524 	if (!cmd)
6525 		err = -ENOMEM;
6526 	else
6527 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6528 					 set_advertising_complete);
6529 
6530 	if (err < 0 && cmd)
6531 		mgmt_pending_remove(cmd);
6532 
6533 unlock:
6534 	hci_dev_unlock(hdev);
6535 	return err;
6536 }
6537 
6538 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6539 			      void *data, u16 len)
6540 {
6541 	struct mgmt_cp_set_static_address *cp = data;
6542 	int err;
6543 
6544 	bt_dev_dbg(hdev, "sock %p", sk);
6545 
6546 	if (!lmp_le_capable(hdev))
6547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6548 				       MGMT_STATUS_NOT_SUPPORTED);
6549 
6550 	if (hdev_is_powered(hdev))
6551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6552 				       MGMT_STATUS_REJECTED);
6553 
6554 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6555 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6556 			return mgmt_cmd_status(sk, hdev->id,
6557 					       MGMT_OP_SET_STATIC_ADDRESS,
6558 					       MGMT_STATUS_INVALID_PARAMS);
6559 
6560 		/* Two most significant bits shall be set */
6561 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6562 			return mgmt_cmd_status(sk, hdev->id,
6563 					       MGMT_OP_SET_STATIC_ADDRESS,
6564 					       MGMT_STATUS_INVALID_PARAMS);
6565 	}
6566 
6567 	hci_dev_lock(hdev);
6568 
6569 	bacpy(&hdev->static_addr, &cp->bdaddr);
6570 
6571 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6572 	if (err < 0)
6573 		goto unlock;
6574 
6575 	err = new_settings(hdev, sk);
6576 
6577 unlock:
6578 	hci_dev_unlock(hdev);
6579 	return err;
6580 }
6581 
6582 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6583 			   void *data, u16 len)
6584 {
6585 	struct mgmt_cp_set_scan_params *cp = data;
6586 	__u16 interval, window;
6587 	int err;
6588 
6589 	bt_dev_dbg(hdev, "sock %p", sk);
6590 
6591 	if (!lmp_le_capable(hdev))
6592 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6593 				       MGMT_STATUS_NOT_SUPPORTED);
6594 
6595 	interval = __le16_to_cpu(cp->interval);
6596 
6597 	if (interval < 0x0004 || interval > 0x4000)
6598 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6599 				       MGMT_STATUS_INVALID_PARAMS);
6600 
6601 	window = __le16_to_cpu(cp->window);
6602 
6603 	if (window < 0x0004 || window > 0x4000)
6604 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6605 				       MGMT_STATUS_INVALID_PARAMS);
6606 
6607 	if (window > interval)
6608 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6609 				       MGMT_STATUS_INVALID_PARAMS);
6610 
6611 	hci_dev_lock(hdev);
6612 
6613 	hdev->le_scan_interval = interval;
6614 	hdev->le_scan_window = window;
6615 
6616 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6617 				NULL, 0);
6618 
6619 	/* If background scan is running, restart it so new parameters are
6620 	 * loaded.
6621 	 */
6622 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6623 	    hdev->discovery.state == DISCOVERY_STOPPED)
6624 		hci_update_passive_scan(hdev);
6625 
6626 	hci_dev_unlock(hdev);
6627 
6628 	return err;
6629 }
6630 
6631 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6632 {
6633 	struct mgmt_pending_cmd *cmd = data;
6634 
6635 	bt_dev_dbg(hdev, "err %d", err);
6636 
6637 	if (err) {
6638 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6639 				mgmt_status(err));
6640 	} else {
6641 		struct mgmt_mode *cp = cmd->param;
6642 
6643 		if (cp->val)
6644 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6645 		else
6646 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6647 
6648 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6649 		new_settings(hdev, cmd->sk);
6650 	}
6651 
6652 	mgmt_pending_free(cmd);
6653 }
6654 
6655 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6656 {
6657 	struct mgmt_pending_cmd *cmd = data;
6658 	struct mgmt_mode *cp = cmd->param;
6659 
6660 	return hci_write_fast_connectable_sync(hdev, cp->val);
6661 }
6662 
6663 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6664 				void *data, u16 len)
6665 {
6666 	struct mgmt_mode *cp = data;
6667 	struct mgmt_pending_cmd *cmd;
6668 	int err;
6669 
6670 	bt_dev_dbg(hdev, "sock %p", sk);
6671 
6672 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6673 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6674 		return mgmt_cmd_status(sk, hdev->id,
6675 				       MGMT_OP_SET_FAST_CONNECTABLE,
6676 				       MGMT_STATUS_NOT_SUPPORTED);
6677 
6678 	if (cp->val != 0x00 && cp->val != 0x01)
6679 		return mgmt_cmd_status(sk, hdev->id,
6680 				       MGMT_OP_SET_FAST_CONNECTABLE,
6681 				       MGMT_STATUS_INVALID_PARAMS);
6682 
6683 	hci_dev_lock(hdev);
6684 
6685 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6686 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6687 		goto unlock;
6688 	}
6689 
6690 	if (!hdev_is_powered(hdev)) {
6691 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6692 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6693 		new_settings(hdev, sk);
6694 		goto unlock;
6695 	}
6696 
6697 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6698 			       len);
6699 	if (!cmd)
6700 		err = -ENOMEM;
6701 	else
6702 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6703 					 fast_connectable_complete);
6704 
6705 	if (err < 0) {
6706 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6707 				MGMT_STATUS_FAILED);
6708 
6709 		if (cmd)
6710 			mgmt_pending_free(cmd);
6711 	}
6712 
6713 unlock:
6714 	hci_dev_unlock(hdev);
6715 
6716 	return err;
6717 }
6718 
6719 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6720 {
6721 	struct mgmt_pending_cmd *cmd = data;
6722 
6723 	bt_dev_dbg(hdev, "err %d", err);
6724 
6725 	if (err) {
6726 		u8 mgmt_err = mgmt_status(err);
6727 
6728 		/* We need to restore the flag if related HCI commands
6729 		 * failed.
6730 		 */
6731 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6732 
6733 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6734 	} else {
6735 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6736 		new_settings(hdev, cmd->sk);
6737 	}
6738 
6739 	mgmt_pending_free(cmd);
6740 }
6741 
6742 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6743 {
6744 	int status;
6745 
6746 	status = hci_write_fast_connectable_sync(hdev, false);
6747 
6748 	if (!status)
6749 		status = hci_update_scan_sync(hdev);
6750 
6751 	/* Since only the advertising data flags will change, there
6752 	 * is no need to update the scan response data.
6753 	 */
6754 	if (!status)
6755 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6756 
6757 	return status;
6758 }
6759 
6760 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6761 {
6762 	struct mgmt_mode *cp = data;
6763 	struct mgmt_pending_cmd *cmd;
6764 	int err;
6765 
6766 	bt_dev_dbg(hdev, "sock %p", sk);
6767 
6768 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6769 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6770 				       MGMT_STATUS_NOT_SUPPORTED);
6771 
6772 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6773 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6774 				       MGMT_STATUS_REJECTED);
6775 
6776 	if (cp->val != 0x00 && cp->val != 0x01)
6777 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6778 				       MGMT_STATUS_INVALID_PARAMS);
6779 
6780 	hci_dev_lock(hdev);
6781 
6782 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6783 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6784 		goto unlock;
6785 	}
6786 
6787 	if (!hdev_is_powered(hdev)) {
6788 		if (!cp->val) {
6789 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6790 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6791 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6792 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6793 		}
6794 
6795 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6796 
6797 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6798 		if (err < 0)
6799 			goto unlock;
6800 
6801 		err = new_settings(hdev, sk);
6802 		goto unlock;
6803 	}
6804 
6805 	/* Reject disabling when powered on */
6806 	if (!cp->val) {
6807 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808 				      MGMT_STATUS_REJECTED);
6809 		goto unlock;
6810 	} else {
6811 		/* When configuring a dual-mode controller to operate
6812 		 * with LE only and using a static address, then switching
6813 		 * BR/EDR back on is not allowed.
6814 		 *
6815 		 * Dual-mode controllers shall operate with the public
6816 		 * address as its identity address for BR/EDR and LE. So
6817 		 * reject the attempt to create an invalid configuration.
6818 		 *
6819 		 * The same restrictions applies when secure connections
6820 		 * has been enabled. For BR/EDR this is a controller feature
6821 		 * while for LE it is a host stack feature. This means that
6822 		 * switching BR/EDR back on when secure connections has been
6823 		 * enabled is not a supported transaction.
6824 		 */
6825 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6826 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6827 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6828 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6829 					      MGMT_STATUS_REJECTED);
6830 			goto unlock;
6831 		}
6832 	}
6833 
6834 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6835 	if (!cmd)
6836 		err = -ENOMEM;
6837 	else
6838 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6839 					 set_bredr_complete);
6840 
6841 	if (err < 0) {
6842 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6843 				MGMT_STATUS_FAILED);
6844 		if (cmd)
6845 			mgmt_pending_free(cmd);
6846 
6847 		goto unlock;
6848 	}
6849 
6850 	/* We need to flip the bit already here so that
6851 	 * hci_req_update_adv_data generates the correct flags.
6852 	 */
6853 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6854 
6855 unlock:
6856 	hci_dev_unlock(hdev);
6857 	return err;
6858 }
6859 
6860 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6861 {
6862 	struct mgmt_pending_cmd *cmd = data;
6863 	struct mgmt_mode *cp;
6864 
6865 	bt_dev_dbg(hdev, "err %d", err);
6866 
6867 	if (err) {
6868 		u8 mgmt_err = mgmt_status(err);
6869 
6870 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6871 		goto done;
6872 	}
6873 
6874 	cp = cmd->param;
6875 
6876 	switch (cp->val) {
6877 	case 0x00:
6878 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6879 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6880 		break;
6881 	case 0x01:
6882 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6883 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6884 		break;
6885 	case 0x02:
6886 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6887 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6888 		break;
6889 	}
6890 
6891 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6892 	new_settings(hdev, cmd->sk);
6893 
6894 done:
6895 	mgmt_pending_free(cmd);
6896 }
6897 
6898 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6899 {
6900 	struct mgmt_pending_cmd *cmd = data;
6901 	struct mgmt_mode *cp = cmd->param;
6902 	u8 val = !!cp->val;
6903 
6904 	/* Force write of val */
6905 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6906 
6907 	return hci_write_sc_support_sync(hdev, val);
6908 }
6909 
6910 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6911 			   void *data, u16 len)
6912 {
6913 	struct mgmt_mode *cp = data;
6914 	struct mgmt_pending_cmd *cmd;
6915 	u8 val;
6916 	int err;
6917 
6918 	bt_dev_dbg(hdev, "sock %p", sk);
6919 
6920 	if (!lmp_sc_capable(hdev) &&
6921 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6923 				       MGMT_STATUS_NOT_SUPPORTED);
6924 
6925 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6926 	    lmp_sc_capable(hdev) &&
6927 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6929 				       MGMT_STATUS_REJECTED);
6930 
6931 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6932 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6933 				       MGMT_STATUS_INVALID_PARAMS);
6934 
6935 	hci_dev_lock(hdev);
6936 
6937 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6938 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6939 		bool changed;
6940 
6941 		if (cp->val) {
6942 			changed = !hci_dev_test_and_set_flag(hdev,
6943 							     HCI_SC_ENABLED);
6944 			if (cp->val == 0x02)
6945 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6946 			else
6947 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6948 		} else {
6949 			changed = hci_dev_test_and_clear_flag(hdev,
6950 							      HCI_SC_ENABLED);
6951 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6952 		}
6953 
6954 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6955 		if (err < 0)
6956 			goto failed;
6957 
6958 		if (changed)
6959 			err = new_settings(hdev, sk);
6960 
6961 		goto failed;
6962 	}
6963 
6964 	val = !!cp->val;
6965 
6966 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6967 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6968 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6969 		goto failed;
6970 	}
6971 
6972 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6973 	if (!cmd)
6974 		err = -ENOMEM;
6975 	else
6976 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6977 					 set_secure_conn_complete);
6978 
6979 	if (err < 0) {
6980 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6981 				MGMT_STATUS_FAILED);
6982 		if (cmd)
6983 			mgmt_pending_free(cmd);
6984 	}
6985 
6986 failed:
6987 	hci_dev_unlock(hdev);
6988 	return err;
6989 }
6990 
6991 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6992 			  void *data, u16 len)
6993 {
6994 	struct mgmt_mode *cp = data;
6995 	bool changed, use_changed;
6996 	int err;
6997 
6998 	bt_dev_dbg(hdev, "sock %p", sk);
6999 
7000 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
7001 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
7002 				       MGMT_STATUS_INVALID_PARAMS);
7003 
7004 	hci_dev_lock(hdev);
7005 
7006 	if (cp->val)
7007 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
7008 	else
7009 		changed = hci_dev_test_and_clear_flag(hdev,
7010 						      HCI_KEEP_DEBUG_KEYS);
7011 
7012 	if (cp->val == 0x02)
7013 		use_changed = !hci_dev_test_and_set_flag(hdev,
7014 							 HCI_USE_DEBUG_KEYS);
7015 	else
7016 		use_changed = hci_dev_test_and_clear_flag(hdev,
7017 							  HCI_USE_DEBUG_KEYS);
7018 
7019 	if (hdev_is_powered(hdev) && use_changed &&
7020 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7021 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7022 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7023 			     sizeof(mode), &mode);
7024 	}
7025 
7026 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7027 	if (err < 0)
7028 		goto unlock;
7029 
7030 	if (changed)
7031 		err = new_settings(hdev, sk);
7032 
7033 unlock:
7034 	hci_dev_unlock(hdev);
7035 	return err;
7036 }
7037 
7038 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7039 		       u16 len)
7040 {
7041 	struct mgmt_cp_set_privacy *cp = cp_data;
7042 	bool changed;
7043 	int err;
7044 
7045 	bt_dev_dbg(hdev, "sock %p", sk);
7046 
7047 	if (!lmp_le_capable(hdev))
7048 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7049 				       MGMT_STATUS_NOT_SUPPORTED);
7050 
7051 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7052 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7053 				       MGMT_STATUS_INVALID_PARAMS);
7054 
7055 	if (hdev_is_powered(hdev))
7056 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7057 				       MGMT_STATUS_REJECTED);
7058 
7059 	hci_dev_lock(hdev);
7060 
7061 	/* If user space supports this command it is also expected to
7062 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7063 	 */
7064 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7065 
7066 	if (cp->privacy) {
7067 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7068 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7069 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7070 		hci_adv_instances_set_rpa_expired(hdev, true);
7071 		if (cp->privacy == 0x02)
7072 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7073 		else
7074 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7075 	} else {
7076 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7077 		memset(hdev->irk, 0, sizeof(hdev->irk));
7078 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7079 		hci_adv_instances_set_rpa_expired(hdev, false);
7080 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7081 	}
7082 
7083 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7084 	if (err < 0)
7085 		goto unlock;
7086 
7087 	if (changed)
7088 		err = new_settings(hdev, sk);
7089 
7090 unlock:
7091 	hci_dev_unlock(hdev);
7092 	return err;
7093 }
7094 
7095 static bool irk_is_valid(struct mgmt_irk_info *irk)
7096 {
7097 	switch (irk->addr.type) {
7098 	case BDADDR_LE_PUBLIC:
7099 		return true;
7100 
7101 	case BDADDR_LE_RANDOM:
7102 		/* Two most significant bits shall be set */
7103 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7104 			return false;
7105 		return true;
7106 	}
7107 
7108 	return false;
7109 }
7110 
7111 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7112 		     u16 len)
7113 {
7114 	struct mgmt_cp_load_irks *cp = cp_data;
7115 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7116 				   sizeof(struct mgmt_irk_info));
7117 	u16 irk_count, expected_len;
7118 	int i, err;
7119 
7120 	bt_dev_dbg(hdev, "sock %p", sk);
7121 
7122 	if (!lmp_le_capable(hdev))
7123 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7124 				       MGMT_STATUS_NOT_SUPPORTED);
7125 
7126 	irk_count = __le16_to_cpu(cp->irk_count);
7127 	if (irk_count > max_irk_count) {
7128 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7129 			   irk_count);
7130 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7131 				       MGMT_STATUS_INVALID_PARAMS);
7132 	}
7133 
7134 	expected_len = struct_size(cp, irks, irk_count);
7135 	if (expected_len != len) {
7136 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7137 			   expected_len, len);
7138 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7139 				       MGMT_STATUS_INVALID_PARAMS);
7140 	}
7141 
7142 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7143 
7144 	for (i = 0; i < irk_count; i++) {
7145 		struct mgmt_irk_info *key = &cp->irks[i];
7146 
7147 		if (!irk_is_valid(key))
7148 			return mgmt_cmd_status(sk, hdev->id,
7149 					       MGMT_OP_LOAD_IRKS,
7150 					       MGMT_STATUS_INVALID_PARAMS);
7151 	}
7152 
7153 	hci_dev_lock(hdev);
7154 
7155 	hci_smp_irks_clear(hdev);
7156 
7157 	for (i = 0; i < irk_count; i++) {
7158 		struct mgmt_irk_info *irk = &cp->irks[i];
7159 
7160 		if (hci_is_blocked_key(hdev,
7161 				       HCI_BLOCKED_KEY_TYPE_IRK,
7162 				       irk->val)) {
7163 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7164 				    &irk->addr.bdaddr);
7165 			continue;
7166 		}
7167 
7168 		hci_add_irk(hdev, &irk->addr.bdaddr,
7169 			    le_addr_type(irk->addr.type), irk->val,
7170 			    BDADDR_ANY);
7171 	}
7172 
7173 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7174 
7175 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7176 
7177 	hci_dev_unlock(hdev);
7178 
7179 	return err;
7180 }
7181 
7182 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7183 {
7184 	if (key->initiator != 0x00 && key->initiator != 0x01)
7185 		return false;
7186 
7187 	switch (key->addr.type) {
7188 	case BDADDR_LE_PUBLIC:
7189 		return true;
7190 
7191 	case BDADDR_LE_RANDOM:
7192 		/* Two most significant bits shall be set */
7193 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7194 			return false;
7195 		return true;
7196 	}
7197 
7198 	return false;
7199 }
7200 
7201 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7202 			       void *cp_data, u16 len)
7203 {
7204 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7205 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7206 				   sizeof(struct mgmt_ltk_info));
7207 	u16 key_count, expected_len;
7208 	int i, err;
7209 
7210 	bt_dev_dbg(hdev, "sock %p", sk);
7211 
7212 	if (!lmp_le_capable(hdev))
7213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7214 				       MGMT_STATUS_NOT_SUPPORTED);
7215 
7216 	key_count = __le16_to_cpu(cp->key_count);
7217 	if (key_count > max_key_count) {
7218 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7219 			   key_count);
7220 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7221 				       MGMT_STATUS_INVALID_PARAMS);
7222 	}
7223 
7224 	expected_len = struct_size(cp, keys, key_count);
7225 	if (expected_len != len) {
7226 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7227 			   expected_len, len);
7228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7229 				       MGMT_STATUS_INVALID_PARAMS);
7230 	}
7231 
7232 	bt_dev_dbg(hdev, "key_count %u", key_count);
7233 
7234 	hci_dev_lock(hdev);
7235 
7236 	hci_smp_ltks_clear(hdev);
7237 
7238 	for (i = 0; i < key_count; i++) {
7239 		struct mgmt_ltk_info *key = &cp->keys[i];
7240 		u8 type, authenticated;
7241 
7242 		if (hci_is_blocked_key(hdev,
7243 				       HCI_BLOCKED_KEY_TYPE_LTK,
7244 				       key->val)) {
7245 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7246 				    &key->addr.bdaddr);
7247 			continue;
7248 		}
7249 
7250 		if (!ltk_is_valid(key)) {
7251 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7252 				    &key->addr.bdaddr);
7253 			continue;
7254 		}
7255 
7256 		switch (key->type) {
7257 		case MGMT_LTK_UNAUTHENTICATED:
7258 			authenticated = 0x00;
7259 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7260 			break;
7261 		case MGMT_LTK_AUTHENTICATED:
7262 			authenticated = 0x01;
7263 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7264 			break;
7265 		case MGMT_LTK_P256_UNAUTH:
7266 			authenticated = 0x00;
7267 			type = SMP_LTK_P256;
7268 			break;
7269 		case MGMT_LTK_P256_AUTH:
7270 			authenticated = 0x01;
7271 			type = SMP_LTK_P256;
7272 			break;
7273 		case MGMT_LTK_P256_DEBUG:
7274 			authenticated = 0x00;
7275 			type = SMP_LTK_P256_DEBUG;
7276 			fallthrough;
7277 		default:
7278 			continue;
7279 		}
7280 
7281 		hci_add_ltk(hdev, &key->addr.bdaddr,
7282 			    le_addr_type(key->addr.type), type, authenticated,
7283 			    key->val, key->enc_size, key->ediv, key->rand);
7284 	}
7285 
7286 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7287 			   NULL, 0);
7288 
7289 	hci_dev_unlock(hdev);
7290 
7291 	return err;
7292 }
7293 
7294 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7295 {
7296 	struct mgmt_pending_cmd *cmd = data;
7297 	struct hci_conn *conn = cmd->user_data;
7298 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7299 	struct mgmt_rp_get_conn_info rp;
7300 	u8 status;
7301 
7302 	bt_dev_dbg(hdev, "err %d", err);
7303 
7304 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7305 
7306 	status = mgmt_status(err);
7307 	if (status == MGMT_STATUS_SUCCESS) {
7308 		rp.rssi = conn->rssi;
7309 		rp.tx_power = conn->tx_power;
7310 		rp.max_tx_power = conn->max_tx_power;
7311 	} else {
7312 		rp.rssi = HCI_RSSI_INVALID;
7313 		rp.tx_power = HCI_TX_POWER_INVALID;
7314 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7315 	}
7316 
7317 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7318 			  &rp, sizeof(rp));
7319 
7320 	mgmt_pending_free(cmd);
7321 }
7322 
7323 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7324 {
7325 	struct mgmt_pending_cmd *cmd = data;
7326 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7327 	struct hci_conn *conn;
7328 	int err;
7329 	__le16   handle;
7330 
7331 	/* Make sure we are still connected */
7332 	if (cp->addr.type == BDADDR_BREDR)
7333 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7334 					       &cp->addr.bdaddr);
7335 	else
7336 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7337 
7338 	if (!conn || conn->state != BT_CONNECTED)
7339 		return MGMT_STATUS_NOT_CONNECTED;
7340 
7341 	cmd->user_data = conn;
7342 	handle = cpu_to_le16(conn->handle);
7343 
7344 	/* Refresh RSSI each time */
7345 	err = hci_read_rssi_sync(hdev, handle);
7346 
7347 	/* For LE links TX power does not change thus we don't need to
7348 	 * query for it once value is known.
7349 	 */
7350 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7351 		     conn->tx_power == HCI_TX_POWER_INVALID))
7352 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7353 
7354 	/* Max TX power needs to be read only once per connection */
7355 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7356 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7357 
7358 	return err;
7359 }
7360 
7361 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7362 			 u16 len)
7363 {
7364 	struct mgmt_cp_get_conn_info *cp = data;
7365 	struct mgmt_rp_get_conn_info rp;
7366 	struct hci_conn *conn;
7367 	unsigned long conn_info_age;
7368 	int err = 0;
7369 
7370 	bt_dev_dbg(hdev, "sock %p", sk);
7371 
7372 	memset(&rp, 0, sizeof(rp));
7373 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7374 	rp.addr.type = cp->addr.type;
7375 
7376 	if (!bdaddr_type_is_valid(cp->addr.type))
7377 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7378 					 MGMT_STATUS_INVALID_PARAMS,
7379 					 &rp, sizeof(rp));
7380 
7381 	hci_dev_lock(hdev);
7382 
7383 	if (!hdev_is_powered(hdev)) {
7384 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7385 					MGMT_STATUS_NOT_POWERED, &rp,
7386 					sizeof(rp));
7387 		goto unlock;
7388 	}
7389 
7390 	if (cp->addr.type == BDADDR_BREDR)
7391 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7392 					       &cp->addr.bdaddr);
7393 	else
7394 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7395 
7396 	if (!conn || conn->state != BT_CONNECTED) {
7397 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7398 					MGMT_STATUS_NOT_CONNECTED, &rp,
7399 					sizeof(rp));
7400 		goto unlock;
7401 	}
7402 
7403 	/* To avoid client trying to guess when to poll again for information we
7404 	 * calculate conn info age as random value between min/max set in hdev.
7405 	 */
7406 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7407 						 hdev->conn_info_max_age - 1);
7408 
7409 	/* Query controller to refresh cached values if they are too old or were
7410 	 * never read.
7411 	 */
7412 	if (time_after(jiffies, conn->conn_info_timestamp +
7413 		       msecs_to_jiffies(conn_info_age)) ||
7414 	    !conn->conn_info_timestamp) {
7415 		struct mgmt_pending_cmd *cmd;
7416 
7417 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7418 				       len);
7419 		if (!cmd) {
7420 			err = -ENOMEM;
7421 		} else {
7422 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7423 						 cmd, get_conn_info_complete);
7424 		}
7425 
7426 		if (err < 0) {
7427 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7428 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7429 
7430 			if (cmd)
7431 				mgmt_pending_free(cmd);
7432 
7433 			goto unlock;
7434 		}
7435 
7436 		conn->conn_info_timestamp = jiffies;
7437 	} else {
7438 		/* Cache is valid, just reply with values cached in hci_conn */
7439 		rp.rssi = conn->rssi;
7440 		rp.tx_power = conn->tx_power;
7441 		rp.max_tx_power = conn->max_tx_power;
7442 
7443 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7444 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7445 	}
7446 
7447 unlock:
7448 	hci_dev_unlock(hdev);
7449 	return err;
7450 }
7451 
7452 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7453 {
7454 	struct mgmt_pending_cmd *cmd = data;
7455 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7456 	struct mgmt_rp_get_clock_info rp;
7457 	struct hci_conn *conn = cmd->user_data;
7458 	u8 status = mgmt_status(err);
7459 
7460 	bt_dev_dbg(hdev, "err %d", err);
7461 
7462 	memset(&rp, 0, sizeof(rp));
7463 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7464 	rp.addr.type = cp->addr.type;
7465 
7466 	if (err)
7467 		goto complete;
7468 
7469 	rp.local_clock = cpu_to_le32(hdev->clock);
7470 
7471 	if (conn) {
7472 		rp.piconet_clock = cpu_to_le32(conn->clock);
7473 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7474 	}
7475 
7476 complete:
7477 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7478 			  sizeof(rp));
7479 
7480 	mgmt_pending_free(cmd);
7481 }
7482 
7483 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7484 {
7485 	struct mgmt_pending_cmd *cmd = data;
7486 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7487 	struct hci_cp_read_clock hci_cp;
7488 	struct hci_conn *conn;
7489 
7490 	memset(&hci_cp, 0, sizeof(hci_cp));
7491 	hci_read_clock_sync(hdev, &hci_cp);
7492 
7493 	/* Make sure connection still exists */
7494 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7495 	if (!conn || conn->state != BT_CONNECTED)
7496 		return MGMT_STATUS_NOT_CONNECTED;
7497 
7498 	cmd->user_data = conn;
7499 	hci_cp.handle = cpu_to_le16(conn->handle);
7500 	hci_cp.which = 0x01; /* Piconet clock */
7501 
7502 	return hci_read_clock_sync(hdev, &hci_cp);
7503 }
7504 
7505 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7506 								u16 len)
7507 {
7508 	struct mgmt_cp_get_clock_info *cp = data;
7509 	struct mgmt_rp_get_clock_info rp;
7510 	struct mgmt_pending_cmd *cmd;
7511 	struct hci_conn *conn;
7512 	int err;
7513 
7514 	bt_dev_dbg(hdev, "sock %p", sk);
7515 
7516 	memset(&rp, 0, sizeof(rp));
7517 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7518 	rp.addr.type = cp->addr.type;
7519 
7520 	if (cp->addr.type != BDADDR_BREDR)
7521 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7522 					 MGMT_STATUS_INVALID_PARAMS,
7523 					 &rp, sizeof(rp));
7524 
7525 	hci_dev_lock(hdev);
7526 
7527 	if (!hdev_is_powered(hdev)) {
7528 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7529 					MGMT_STATUS_NOT_POWERED, &rp,
7530 					sizeof(rp));
7531 		goto unlock;
7532 	}
7533 
7534 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7535 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7536 					       &cp->addr.bdaddr);
7537 		if (!conn || conn->state != BT_CONNECTED) {
7538 			err = mgmt_cmd_complete(sk, hdev->id,
7539 						MGMT_OP_GET_CLOCK_INFO,
7540 						MGMT_STATUS_NOT_CONNECTED,
7541 						&rp, sizeof(rp));
7542 			goto unlock;
7543 		}
7544 	} else {
7545 		conn = NULL;
7546 	}
7547 
7548 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7549 	if (!cmd)
7550 		err = -ENOMEM;
7551 	else
7552 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7553 					 get_clock_info_complete);
7554 
7555 	if (err < 0) {
7556 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7557 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7558 
7559 		if (cmd)
7560 			mgmt_pending_free(cmd);
7561 	}
7562 
7563 
7564 unlock:
7565 	hci_dev_unlock(hdev);
7566 	return err;
7567 }
7568 
7569 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7570 {
7571 	struct hci_conn *conn;
7572 
7573 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7574 	if (!conn)
7575 		return false;
7576 
7577 	if (conn->dst_type != type)
7578 		return false;
7579 
7580 	if (conn->state != BT_CONNECTED)
7581 		return false;
7582 
7583 	return true;
7584 }
7585 
7586 /* This function requires the caller holds hdev->lock */
7587 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7588 			       u8 addr_type, u8 auto_connect)
7589 {
7590 	struct hci_conn_params *params;
7591 
7592 	params = hci_conn_params_add(hdev, addr, addr_type);
7593 	if (!params)
7594 		return -EIO;
7595 
7596 	if (params->auto_connect == auto_connect)
7597 		return 0;
7598 
7599 	hci_pend_le_list_del_init(params);
7600 
7601 	switch (auto_connect) {
7602 	case HCI_AUTO_CONN_DISABLED:
7603 	case HCI_AUTO_CONN_LINK_LOSS:
7604 		/* If auto connect is being disabled when we're trying to
7605 		 * connect to device, keep connecting.
7606 		 */
7607 		if (params->explicit_connect)
7608 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7609 		break;
7610 	case HCI_AUTO_CONN_REPORT:
7611 		if (params->explicit_connect)
7612 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7613 		else
7614 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7615 		break;
7616 	case HCI_AUTO_CONN_DIRECT:
7617 	case HCI_AUTO_CONN_ALWAYS:
7618 		if (!is_connected(hdev, addr, addr_type))
7619 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7620 		break;
7621 	}
7622 
7623 	params->auto_connect = auto_connect;
7624 
7625 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7626 		   addr, addr_type, auto_connect);
7627 
7628 	return 0;
7629 }
7630 
7631 static void device_added(struct sock *sk, struct hci_dev *hdev,
7632 			 bdaddr_t *bdaddr, u8 type, u8 action)
7633 {
7634 	struct mgmt_ev_device_added ev;
7635 
7636 	bacpy(&ev.addr.bdaddr, bdaddr);
7637 	ev.addr.type = type;
7638 	ev.action = action;
7639 
7640 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7641 }
7642 
7643 static int add_device_sync(struct hci_dev *hdev, void *data)
7644 {
7645 	return hci_update_passive_scan_sync(hdev);
7646 }
7647 
7648 static int add_device(struct sock *sk, struct hci_dev *hdev,
7649 		      void *data, u16 len)
7650 {
7651 	struct mgmt_cp_add_device *cp = data;
7652 	u8 auto_conn, addr_type;
7653 	struct hci_conn_params *params;
7654 	int err;
7655 	u32 current_flags = 0;
7656 	u32 supported_flags;
7657 
7658 	bt_dev_dbg(hdev, "sock %p", sk);
7659 
7660 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7661 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7662 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7663 					 MGMT_STATUS_INVALID_PARAMS,
7664 					 &cp->addr, sizeof(cp->addr));
7665 
7666 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7667 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7668 					 MGMT_STATUS_INVALID_PARAMS,
7669 					 &cp->addr, sizeof(cp->addr));
7670 
7671 	hci_dev_lock(hdev);
7672 
7673 	if (cp->addr.type == BDADDR_BREDR) {
7674 		/* Only incoming connections action is supported for now */
7675 		if (cp->action != 0x01) {
7676 			err = mgmt_cmd_complete(sk, hdev->id,
7677 						MGMT_OP_ADD_DEVICE,
7678 						MGMT_STATUS_INVALID_PARAMS,
7679 						&cp->addr, sizeof(cp->addr));
7680 			goto unlock;
7681 		}
7682 
7683 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7684 						     &cp->addr.bdaddr,
7685 						     cp->addr.type, 0);
7686 		if (err)
7687 			goto unlock;
7688 
7689 		hci_update_scan(hdev);
7690 
7691 		goto added;
7692 	}
7693 
7694 	addr_type = le_addr_type(cp->addr.type);
7695 
7696 	if (cp->action == 0x02)
7697 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7698 	else if (cp->action == 0x01)
7699 		auto_conn = HCI_AUTO_CONN_DIRECT;
7700 	else
7701 		auto_conn = HCI_AUTO_CONN_REPORT;
7702 
7703 	/* Kernel internally uses conn_params with resolvable private
7704 	 * address, but Add Device allows only identity addresses.
7705 	 * Make sure it is enforced before calling
7706 	 * hci_conn_params_lookup.
7707 	 */
7708 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7709 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7710 					MGMT_STATUS_INVALID_PARAMS,
7711 					&cp->addr, sizeof(cp->addr));
7712 		goto unlock;
7713 	}
7714 
7715 	/* If the connection parameters don't exist for this device,
7716 	 * they will be created and configured with defaults.
7717 	 */
7718 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7719 				auto_conn) < 0) {
7720 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7721 					MGMT_STATUS_FAILED, &cp->addr,
7722 					sizeof(cp->addr));
7723 		goto unlock;
7724 	} else {
7725 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7726 						addr_type);
7727 		if (params)
7728 			current_flags = params->flags;
7729 	}
7730 
7731 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7732 	if (err < 0)
7733 		goto unlock;
7734 
7735 added:
7736 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7737 	supported_flags = hdev->conn_flags;
7738 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7739 			     supported_flags, current_flags);
7740 
7741 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7742 				MGMT_STATUS_SUCCESS, &cp->addr,
7743 				sizeof(cp->addr));
7744 
7745 unlock:
7746 	hci_dev_unlock(hdev);
7747 	return err;
7748 }
7749 
7750 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7751 			   bdaddr_t *bdaddr, u8 type)
7752 {
7753 	struct mgmt_ev_device_removed ev;
7754 
7755 	bacpy(&ev.addr.bdaddr, bdaddr);
7756 	ev.addr.type = type;
7757 
7758 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7759 }
7760 
7761 static int remove_device_sync(struct hci_dev *hdev, void *data)
7762 {
7763 	return hci_update_passive_scan_sync(hdev);
7764 }
7765 
7766 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7767 			 void *data, u16 len)
7768 {
7769 	struct mgmt_cp_remove_device *cp = data;
7770 	int err;
7771 
7772 	bt_dev_dbg(hdev, "sock %p", sk);
7773 
7774 	hci_dev_lock(hdev);
7775 
7776 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7777 		struct hci_conn_params *params;
7778 		u8 addr_type;
7779 
7780 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7781 			err = mgmt_cmd_complete(sk, hdev->id,
7782 						MGMT_OP_REMOVE_DEVICE,
7783 						MGMT_STATUS_INVALID_PARAMS,
7784 						&cp->addr, sizeof(cp->addr));
7785 			goto unlock;
7786 		}
7787 
7788 		if (cp->addr.type == BDADDR_BREDR) {
7789 			err = hci_bdaddr_list_del(&hdev->accept_list,
7790 						  &cp->addr.bdaddr,
7791 						  cp->addr.type);
7792 			if (err) {
7793 				err = mgmt_cmd_complete(sk, hdev->id,
7794 							MGMT_OP_REMOVE_DEVICE,
7795 							MGMT_STATUS_INVALID_PARAMS,
7796 							&cp->addr,
7797 							sizeof(cp->addr));
7798 				goto unlock;
7799 			}
7800 
7801 			hci_update_scan(hdev);
7802 
7803 			device_removed(sk, hdev, &cp->addr.bdaddr,
7804 				       cp->addr.type);
7805 			goto complete;
7806 		}
7807 
7808 		addr_type = le_addr_type(cp->addr.type);
7809 
7810 		/* Kernel internally uses conn_params with resolvable private
7811 		 * address, but Remove Device allows only identity addresses.
7812 		 * Make sure it is enforced before calling
7813 		 * hci_conn_params_lookup.
7814 		 */
7815 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7816 			err = mgmt_cmd_complete(sk, hdev->id,
7817 						MGMT_OP_REMOVE_DEVICE,
7818 						MGMT_STATUS_INVALID_PARAMS,
7819 						&cp->addr, sizeof(cp->addr));
7820 			goto unlock;
7821 		}
7822 
7823 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7824 						addr_type);
7825 		if (!params) {
7826 			err = mgmt_cmd_complete(sk, hdev->id,
7827 						MGMT_OP_REMOVE_DEVICE,
7828 						MGMT_STATUS_INVALID_PARAMS,
7829 						&cp->addr, sizeof(cp->addr));
7830 			goto unlock;
7831 		}
7832 
7833 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7834 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7835 			err = mgmt_cmd_complete(sk, hdev->id,
7836 						MGMT_OP_REMOVE_DEVICE,
7837 						MGMT_STATUS_INVALID_PARAMS,
7838 						&cp->addr, sizeof(cp->addr));
7839 			goto unlock;
7840 		}
7841 
7842 		hci_conn_params_free(params);
7843 
7844 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7845 	} else {
7846 		struct hci_conn_params *p, *tmp;
7847 		struct bdaddr_list *b, *btmp;
7848 
7849 		if (cp->addr.type) {
7850 			err = mgmt_cmd_complete(sk, hdev->id,
7851 						MGMT_OP_REMOVE_DEVICE,
7852 						MGMT_STATUS_INVALID_PARAMS,
7853 						&cp->addr, sizeof(cp->addr));
7854 			goto unlock;
7855 		}
7856 
7857 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7858 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7859 			list_del(&b->list);
7860 			kfree(b);
7861 		}
7862 
7863 		hci_update_scan(hdev);
7864 
7865 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7866 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7867 				continue;
7868 			device_removed(sk, hdev, &p->addr, p->addr_type);
7869 			if (p->explicit_connect) {
7870 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7871 				continue;
7872 			}
7873 			hci_conn_params_free(p);
7874 		}
7875 
7876 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7877 	}
7878 
7879 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7880 
7881 complete:
7882 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7883 				MGMT_STATUS_SUCCESS, &cp->addr,
7884 				sizeof(cp->addr));
7885 unlock:
7886 	hci_dev_unlock(hdev);
7887 	return err;
7888 }
7889 
7890 static int conn_update_sync(struct hci_dev *hdev, void *data)
7891 {
7892 	struct hci_conn_params *params = data;
7893 	struct hci_conn *conn;
7894 
7895 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7896 	if (!conn)
7897 		return -ECANCELED;
7898 
7899 	return hci_le_conn_update_sync(hdev, conn, params);
7900 }
7901 
7902 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7903 			   u16 len)
7904 {
7905 	struct mgmt_cp_load_conn_param *cp = data;
7906 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7907 				     sizeof(struct mgmt_conn_param));
7908 	u16 param_count, expected_len;
7909 	int i;
7910 
7911 	if (!lmp_le_capable(hdev))
7912 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7913 				       MGMT_STATUS_NOT_SUPPORTED);
7914 
7915 	param_count = __le16_to_cpu(cp->param_count);
7916 	if (param_count > max_param_count) {
7917 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7918 			   param_count);
7919 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7920 				       MGMT_STATUS_INVALID_PARAMS);
7921 	}
7922 
7923 	expected_len = struct_size(cp, params, param_count);
7924 	if (expected_len != len) {
7925 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7926 			   expected_len, len);
7927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7928 				       MGMT_STATUS_INVALID_PARAMS);
7929 	}
7930 
7931 	bt_dev_dbg(hdev, "param_count %u", param_count);
7932 
7933 	hci_dev_lock(hdev);
7934 
7935 	if (param_count > 1)
7936 		hci_conn_params_clear_disabled(hdev);
7937 
7938 	for (i = 0; i < param_count; i++) {
7939 		struct mgmt_conn_param *param = &cp->params[i];
7940 		struct hci_conn_params *hci_param;
7941 		u16 min, max, latency, timeout;
7942 		bool update = false;
7943 		u8 addr_type;
7944 
7945 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7946 			   param->addr.type);
7947 
7948 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7949 			addr_type = ADDR_LE_DEV_PUBLIC;
7950 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7951 			addr_type = ADDR_LE_DEV_RANDOM;
7952 		} else {
7953 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7954 			continue;
7955 		}
7956 
7957 		min = le16_to_cpu(param->min_interval);
7958 		max = le16_to_cpu(param->max_interval);
7959 		latency = le16_to_cpu(param->latency);
7960 		timeout = le16_to_cpu(param->timeout);
7961 
7962 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7963 			   min, max, latency, timeout);
7964 
7965 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7966 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7967 			continue;
7968 		}
7969 
7970 		/* Detect when the loading is for an existing parameter then
7971 		 * attempt to trigger the connection update procedure.
7972 		 */
7973 		if (!i && param_count == 1) {
7974 			hci_param = hci_conn_params_lookup(hdev,
7975 							   &param->addr.bdaddr,
7976 							   addr_type);
7977 			if (hci_param)
7978 				update = true;
7979 			else
7980 				hci_conn_params_clear_disabled(hdev);
7981 		}
7982 
7983 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7984 						addr_type);
7985 		if (!hci_param) {
7986 			bt_dev_err(hdev, "failed to add connection parameters");
7987 			continue;
7988 		}
7989 
7990 		hci_param->conn_min_interval = min;
7991 		hci_param->conn_max_interval = max;
7992 		hci_param->conn_latency = latency;
7993 		hci_param->supervision_timeout = timeout;
7994 
7995 		/* Check if we need to trigger a connection update */
7996 		if (update) {
7997 			struct hci_conn *conn;
7998 
7999 			/* Lookup for existing connection as central and check
8000 			 * if parameters match and if they don't then trigger
8001 			 * a connection update.
8002 			 */
8003 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8004 						       addr_type);
8005 			if (conn && conn->role == HCI_ROLE_MASTER &&
8006 			    (conn->le_conn_min_interval != min ||
8007 			     conn->le_conn_max_interval != max ||
8008 			     conn->le_conn_latency != latency ||
8009 			     conn->le_supv_timeout != timeout))
8010 				hci_cmd_sync_queue(hdev, conn_update_sync,
8011 						   hci_param, NULL);
8012 		}
8013 	}
8014 
8015 	hci_dev_unlock(hdev);
8016 
8017 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8018 				 NULL, 0);
8019 }
8020 
8021 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8022 			       void *data, u16 len)
8023 {
8024 	struct mgmt_cp_set_external_config *cp = data;
8025 	bool changed;
8026 	int err;
8027 
8028 	bt_dev_dbg(hdev, "sock %p", sk);
8029 
8030 	if (hdev_is_powered(hdev))
8031 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8032 				       MGMT_STATUS_REJECTED);
8033 
8034 	if (cp->config != 0x00 && cp->config != 0x01)
8035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8036 				         MGMT_STATUS_INVALID_PARAMS);
8037 
8038 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8039 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8040 				       MGMT_STATUS_NOT_SUPPORTED);
8041 
8042 	hci_dev_lock(hdev);
8043 
8044 	if (cp->config)
8045 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8046 	else
8047 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8048 
8049 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8050 	if (err < 0)
8051 		goto unlock;
8052 
8053 	if (!changed)
8054 		goto unlock;
8055 
8056 	err = new_options(hdev, sk);
8057 
8058 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8059 		mgmt_index_removed(hdev);
8060 
8061 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8062 			hci_dev_set_flag(hdev, HCI_CONFIG);
8063 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8064 
8065 			queue_work(hdev->req_workqueue, &hdev->power_on);
8066 		} else {
8067 			set_bit(HCI_RAW, &hdev->flags);
8068 			mgmt_index_added(hdev);
8069 		}
8070 	}
8071 
8072 unlock:
8073 	hci_dev_unlock(hdev);
8074 	return err;
8075 }
8076 
8077 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8078 			      void *data, u16 len)
8079 {
8080 	struct mgmt_cp_set_public_address *cp = data;
8081 	bool changed;
8082 	int err;
8083 
8084 	bt_dev_dbg(hdev, "sock %p", sk);
8085 
8086 	if (hdev_is_powered(hdev))
8087 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8088 				       MGMT_STATUS_REJECTED);
8089 
8090 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8091 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8092 				       MGMT_STATUS_INVALID_PARAMS);
8093 
8094 	if (!hdev->set_bdaddr)
8095 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8096 				       MGMT_STATUS_NOT_SUPPORTED);
8097 
8098 	hci_dev_lock(hdev);
8099 
8100 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8101 	bacpy(&hdev->public_addr, &cp->bdaddr);
8102 
8103 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8104 	if (err < 0)
8105 		goto unlock;
8106 
8107 	if (!changed)
8108 		goto unlock;
8109 
8110 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8111 		err = new_options(hdev, sk);
8112 
8113 	if (is_configured(hdev)) {
8114 		mgmt_index_removed(hdev);
8115 
8116 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8117 
8118 		hci_dev_set_flag(hdev, HCI_CONFIG);
8119 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8120 
8121 		queue_work(hdev->req_workqueue, &hdev->power_on);
8122 	}
8123 
8124 unlock:
8125 	hci_dev_unlock(hdev);
8126 	return err;
8127 }
8128 
8129 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8130 					     int err)
8131 {
8132 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8133 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8134 	u8 *h192, *r192, *h256, *r256;
8135 	struct mgmt_pending_cmd *cmd = data;
8136 	struct sk_buff *skb = cmd->skb;
8137 	u8 status = mgmt_status(err);
8138 	u16 eir_len;
8139 
8140 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8141 		return;
8142 
8143 	if (!status) {
8144 		if (!skb)
8145 			status = MGMT_STATUS_FAILED;
8146 		else if (IS_ERR(skb))
8147 			status = mgmt_status(PTR_ERR(skb));
8148 		else
8149 			status = mgmt_status(skb->data[0]);
8150 	}
8151 
8152 	bt_dev_dbg(hdev, "status %u", status);
8153 
8154 	mgmt_cp = cmd->param;
8155 
8156 	if (status) {
8157 		status = mgmt_status(status);
8158 		eir_len = 0;
8159 
8160 		h192 = NULL;
8161 		r192 = NULL;
8162 		h256 = NULL;
8163 		r256 = NULL;
8164 	} else if (!bredr_sc_enabled(hdev)) {
8165 		struct hci_rp_read_local_oob_data *rp;
8166 
8167 		if (skb->len != sizeof(*rp)) {
8168 			status = MGMT_STATUS_FAILED;
8169 			eir_len = 0;
8170 		} else {
8171 			status = MGMT_STATUS_SUCCESS;
8172 			rp = (void *)skb->data;
8173 
8174 			eir_len = 5 + 18 + 18;
8175 			h192 = rp->hash;
8176 			r192 = rp->rand;
8177 			h256 = NULL;
8178 			r256 = NULL;
8179 		}
8180 	} else {
8181 		struct hci_rp_read_local_oob_ext_data *rp;
8182 
8183 		if (skb->len != sizeof(*rp)) {
8184 			status = MGMT_STATUS_FAILED;
8185 			eir_len = 0;
8186 		} else {
8187 			status = MGMT_STATUS_SUCCESS;
8188 			rp = (void *)skb->data;
8189 
8190 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8191 				eir_len = 5 + 18 + 18;
8192 				h192 = NULL;
8193 				r192 = NULL;
8194 			} else {
8195 				eir_len = 5 + 18 + 18 + 18 + 18;
8196 				h192 = rp->hash192;
8197 				r192 = rp->rand192;
8198 			}
8199 
8200 			h256 = rp->hash256;
8201 			r256 = rp->rand256;
8202 		}
8203 	}
8204 
8205 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8206 	if (!mgmt_rp)
8207 		goto done;
8208 
8209 	if (eir_len == 0)
8210 		goto send_rsp;
8211 
8212 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8213 				  hdev->dev_class, 3);
8214 
8215 	if (h192 && r192) {
8216 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8217 					  EIR_SSP_HASH_C192, h192, 16);
8218 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8219 					  EIR_SSP_RAND_R192, r192, 16);
8220 	}
8221 
8222 	if (h256 && r256) {
8223 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8224 					  EIR_SSP_HASH_C256, h256, 16);
8225 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8226 					  EIR_SSP_RAND_R256, r256, 16);
8227 	}
8228 
8229 send_rsp:
8230 	mgmt_rp->type = mgmt_cp->type;
8231 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8232 
8233 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8234 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8235 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8236 	if (err < 0 || status)
8237 		goto done;
8238 
8239 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8240 
8241 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8242 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8243 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8244 done:
8245 	if (skb && !IS_ERR(skb))
8246 		kfree_skb(skb);
8247 
8248 	kfree(mgmt_rp);
8249 	mgmt_pending_remove(cmd);
8250 }
8251 
8252 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8253 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8254 {
8255 	struct mgmt_pending_cmd *cmd;
8256 	int err;
8257 
8258 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8259 			       cp, sizeof(*cp));
8260 	if (!cmd)
8261 		return -ENOMEM;
8262 
8263 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8264 				 read_local_oob_ext_data_complete);
8265 
8266 	if (err < 0) {
8267 		mgmt_pending_remove(cmd);
8268 		return err;
8269 	}
8270 
8271 	return 0;
8272 }
8273 
8274 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8275 				   void *data, u16 data_len)
8276 {
8277 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8278 	struct mgmt_rp_read_local_oob_ext_data *rp;
8279 	size_t rp_len;
8280 	u16 eir_len;
8281 	u8 status, flags, role, addr[7], hash[16], rand[16];
8282 	int err;
8283 
8284 	bt_dev_dbg(hdev, "sock %p", sk);
8285 
8286 	if (hdev_is_powered(hdev)) {
8287 		switch (cp->type) {
8288 		case BIT(BDADDR_BREDR):
8289 			status = mgmt_bredr_support(hdev);
8290 			if (status)
8291 				eir_len = 0;
8292 			else
8293 				eir_len = 5;
8294 			break;
8295 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8296 			status = mgmt_le_support(hdev);
8297 			if (status)
8298 				eir_len = 0;
8299 			else
8300 				eir_len = 9 + 3 + 18 + 18 + 3;
8301 			break;
8302 		default:
8303 			status = MGMT_STATUS_INVALID_PARAMS;
8304 			eir_len = 0;
8305 			break;
8306 		}
8307 	} else {
8308 		status = MGMT_STATUS_NOT_POWERED;
8309 		eir_len = 0;
8310 	}
8311 
8312 	rp_len = sizeof(*rp) + eir_len;
8313 	rp = kmalloc(rp_len, GFP_ATOMIC);
8314 	if (!rp)
8315 		return -ENOMEM;
8316 
8317 	if (!status && !lmp_ssp_capable(hdev)) {
8318 		status = MGMT_STATUS_NOT_SUPPORTED;
8319 		eir_len = 0;
8320 	}
8321 
8322 	if (status)
8323 		goto complete;
8324 
8325 	hci_dev_lock(hdev);
8326 
8327 	eir_len = 0;
8328 	switch (cp->type) {
8329 	case BIT(BDADDR_BREDR):
8330 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8331 			err = read_local_ssp_oob_req(hdev, sk, cp);
8332 			hci_dev_unlock(hdev);
8333 			if (!err)
8334 				goto done;
8335 
8336 			status = MGMT_STATUS_FAILED;
8337 			goto complete;
8338 		} else {
8339 			eir_len = eir_append_data(rp->eir, eir_len,
8340 						  EIR_CLASS_OF_DEV,
8341 						  hdev->dev_class, 3);
8342 		}
8343 		break;
8344 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8345 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8346 		    smp_generate_oob(hdev, hash, rand) < 0) {
8347 			hci_dev_unlock(hdev);
8348 			status = MGMT_STATUS_FAILED;
8349 			goto complete;
8350 		}
8351 
8352 		/* This should return the active RPA, but since the RPA
8353 		 * is only programmed on demand, it is really hard to fill
8354 		 * this in at the moment. For now disallow retrieving
8355 		 * local out-of-band data when privacy is in use.
8356 		 *
8357 		 * Returning the identity address will not help here since
8358 		 * pairing happens before the identity resolving key is
8359 		 * known and thus the connection establishment happens
8360 		 * based on the RPA and not the identity address.
8361 		 */
8362 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8363 			hci_dev_unlock(hdev);
8364 			status = MGMT_STATUS_REJECTED;
8365 			goto complete;
8366 		}
8367 
8368 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8369 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8370 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8371 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8372 			memcpy(addr, &hdev->static_addr, 6);
8373 			addr[6] = 0x01;
8374 		} else {
8375 			memcpy(addr, &hdev->bdaddr, 6);
8376 			addr[6] = 0x00;
8377 		}
8378 
8379 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8380 					  addr, sizeof(addr));
8381 
8382 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8383 			role = 0x02;
8384 		else
8385 			role = 0x01;
8386 
8387 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8388 					  &role, sizeof(role));
8389 
8390 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8391 			eir_len = eir_append_data(rp->eir, eir_len,
8392 						  EIR_LE_SC_CONFIRM,
8393 						  hash, sizeof(hash));
8394 
8395 			eir_len = eir_append_data(rp->eir, eir_len,
8396 						  EIR_LE_SC_RANDOM,
8397 						  rand, sizeof(rand));
8398 		}
8399 
8400 		flags = mgmt_get_adv_discov_flags(hdev);
8401 
8402 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8403 			flags |= LE_AD_NO_BREDR;
8404 
8405 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8406 					  &flags, sizeof(flags));
8407 		break;
8408 	}
8409 
8410 	hci_dev_unlock(hdev);
8411 
8412 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8413 
8414 	status = MGMT_STATUS_SUCCESS;
8415 
8416 complete:
8417 	rp->type = cp->type;
8418 	rp->eir_len = cpu_to_le16(eir_len);
8419 
8420 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8421 				status, rp, sizeof(*rp) + eir_len);
8422 	if (err < 0 || status)
8423 		goto done;
8424 
8425 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8426 				 rp, sizeof(*rp) + eir_len,
8427 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8428 
8429 done:
8430 	kfree(rp);
8431 
8432 	return err;
8433 }
8434 
8435 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8436 {
8437 	u32 flags = 0;
8438 
8439 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8440 	flags |= MGMT_ADV_FLAG_DISCOV;
8441 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8442 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8443 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8444 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8445 	flags |= MGMT_ADV_PARAM_DURATION;
8446 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8447 	flags |= MGMT_ADV_PARAM_INTERVALS;
8448 	flags |= MGMT_ADV_PARAM_TX_POWER;
8449 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8450 
8451 	/* In extended adv TX_POWER returned from Set Adv Param
8452 	 * will be always valid.
8453 	 */
8454 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8455 		flags |= MGMT_ADV_FLAG_TX_POWER;
8456 
8457 	if (ext_adv_capable(hdev)) {
8458 		flags |= MGMT_ADV_FLAG_SEC_1M;
8459 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8460 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8461 
8462 		if (le_2m_capable(hdev))
8463 			flags |= MGMT_ADV_FLAG_SEC_2M;
8464 
8465 		if (le_coded_capable(hdev))
8466 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8467 	}
8468 
8469 	return flags;
8470 }
8471 
8472 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8473 			     void *data, u16 data_len)
8474 {
8475 	struct mgmt_rp_read_adv_features *rp;
8476 	size_t rp_len;
8477 	int err;
8478 	struct adv_info *adv_instance;
8479 	u32 supported_flags;
8480 	u8 *instance;
8481 
8482 	bt_dev_dbg(hdev, "sock %p", sk);
8483 
8484 	if (!lmp_le_capable(hdev))
8485 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8486 				       MGMT_STATUS_REJECTED);
8487 
8488 	hci_dev_lock(hdev);
8489 
8490 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8491 	rp = kmalloc(rp_len, GFP_ATOMIC);
8492 	if (!rp) {
8493 		hci_dev_unlock(hdev);
8494 		return -ENOMEM;
8495 	}
8496 
8497 	supported_flags = get_supported_adv_flags(hdev);
8498 
8499 	rp->supported_flags = cpu_to_le32(supported_flags);
8500 	rp->max_adv_data_len = max_adv_len(hdev);
8501 	rp->max_scan_rsp_len = max_adv_len(hdev);
8502 	rp->max_instances = hdev->le_num_of_adv_sets;
8503 	rp->num_instances = hdev->adv_instance_cnt;
8504 
8505 	instance = rp->instance;
8506 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8507 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8508 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8509 			*instance = adv_instance->instance;
8510 			instance++;
8511 		} else {
8512 			rp->num_instances--;
8513 			rp_len--;
8514 		}
8515 	}
8516 
8517 	hci_dev_unlock(hdev);
8518 
8519 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8520 				MGMT_STATUS_SUCCESS, rp, rp_len);
8521 
8522 	kfree(rp);
8523 
8524 	return err;
8525 }
8526 
8527 static u8 calculate_name_len(struct hci_dev *hdev)
8528 {
8529 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8530 
8531 	return eir_append_local_name(hdev, buf, 0);
8532 }
8533 
8534 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8535 			   bool is_adv_data)
8536 {
8537 	u8 max_len = max_adv_len(hdev);
8538 
8539 	if (is_adv_data) {
8540 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8541 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8542 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8543 			max_len -= 3;
8544 
8545 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8546 			max_len -= 3;
8547 	} else {
8548 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8549 			max_len -= calculate_name_len(hdev);
8550 
8551 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8552 			max_len -= 4;
8553 	}
8554 
8555 	return max_len;
8556 }
8557 
8558 static bool flags_managed(u32 adv_flags)
8559 {
8560 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8561 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8562 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8563 }
8564 
8565 static bool tx_power_managed(u32 adv_flags)
8566 {
8567 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8568 }
8569 
8570 static bool name_managed(u32 adv_flags)
8571 {
8572 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8573 }
8574 
8575 static bool appearance_managed(u32 adv_flags)
8576 {
8577 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8578 }
8579 
8580 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8581 			      u8 len, bool is_adv_data)
8582 {
8583 	int i, cur_len;
8584 	u8 max_len;
8585 
8586 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8587 
8588 	if (len > max_len)
8589 		return false;
8590 
8591 	/* Make sure that the data is correctly formatted. */
8592 	for (i = 0; i < len; i += (cur_len + 1)) {
8593 		cur_len = data[i];
8594 
8595 		if (!cur_len)
8596 			continue;
8597 
8598 		if (data[i + 1] == EIR_FLAGS &&
8599 		    (!is_adv_data || flags_managed(adv_flags)))
8600 			return false;
8601 
8602 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8603 			return false;
8604 
8605 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8606 			return false;
8607 
8608 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8609 			return false;
8610 
8611 		if (data[i + 1] == EIR_APPEARANCE &&
8612 		    appearance_managed(adv_flags))
8613 			return false;
8614 
8615 		/* If the current field length would exceed the total data
8616 		 * length, then it's invalid.
8617 		 */
8618 		if (i + cur_len >= len)
8619 			return false;
8620 	}
8621 
8622 	return true;
8623 }
8624 
8625 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8626 {
8627 	u32 supported_flags, phy_flags;
8628 
8629 	/* The current implementation only supports a subset of the specified
8630 	 * flags. Also need to check mutual exclusiveness of sec flags.
8631 	 */
8632 	supported_flags = get_supported_adv_flags(hdev);
8633 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8634 	if (adv_flags & ~supported_flags ||
8635 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8636 		return false;
8637 
8638 	return true;
8639 }
8640 
8641 static bool adv_busy(struct hci_dev *hdev)
8642 {
8643 	return pending_find(MGMT_OP_SET_LE, hdev);
8644 }
8645 
8646 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8647 			     int err)
8648 {
8649 	struct adv_info *adv, *n;
8650 
8651 	bt_dev_dbg(hdev, "err %d", err);
8652 
8653 	hci_dev_lock(hdev);
8654 
8655 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8656 		u8 instance;
8657 
8658 		if (!adv->pending)
8659 			continue;
8660 
8661 		if (!err) {
8662 			adv->pending = false;
8663 			continue;
8664 		}
8665 
8666 		instance = adv->instance;
8667 
8668 		if (hdev->cur_adv_instance == instance)
8669 			cancel_adv_timeout(hdev);
8670 
8671 		hci_remove_adv_instance(hdev, instance);
8672 		mgmt_advertising_removed(sk, hdev, instance);
8673 	}
8674 
8675 	hci_dev_unlock(hdev);
8676 }
8677 
8678 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8679 {
8680 	struct mgmt_pending_cmd *cmd = data;
8681 	struct mgmt_cp_add_advertising *cp = cmd->param;
8682 	struct mgmt_rp_add_advertising rp;
8683 
8684 	memset(&rp, 0, sizeof(rp));
8685 
8686 	rp.instance = cp->instance;
8687 
8688 	if (err)
8689 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8690 				mgmt_status(err));
8691 	else
8692 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8693 				  mgmt_status(err), &rp, sizeof(rp));
8694 
8695 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8696 
8697 	mgmt_pending_free(cmd);
8698 }
8699 
8700 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8701 {
8702 	struct mgmt_pending_cmd *cmd = data;
8703 	struct mgmt_cp_add_advertising *cp = cmd->param;
8704 
8705 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8706 }
8707 
8708 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8709 			   void *data, u16 data_len)
8710 {
8711 	struct mgmt_cp_add_advertising *cp = data;
8712 	struct mgmt_rp_add_advertising rp;
8713 	u32 flags;
8714 	u8 status;
8715 	u16 timeout, duration;
8716 	unsigned int prev_instance_cnt;
8717 	u8 schedule_instance = 0;
8718 	struct adv_info *adv, *next_instance;
8719 	int err;
8720 	struct mgmt_pending_cmd *cmd;
8721 
8722 	bt_dev_dbg(hdev, "sock %p", sk);
8723 
8724 	status = mgmt_le_support(hdev);
8725 	if (status)
8726 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8727 				       status);
8728 
8729 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8730 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8731 				       MGMT_STATUS_INVALID_PARAMS);
8732 
8733 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8735 				       MGMT_STATUS_INVALID_PARAMS);
8736 
8737 	flags = __le32_to_cpu(cp->flags);
8738 	timeout = __le16_to_cpu(cp->timeout);
8739 	duration = __le16_to_cpu(cp->duration);
8740 
8741 	if (!requested_adv_flags_are_valid(hdev, flags))
8742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8743 				       MGMT_STATUS_INVALID_PARAMS);
8744 
8745 	hci_dev_lock(hdev);
8746 
8747 	if (timeout && !hdev_is_powered(hdev)) {
8748 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8749 				      MGMT_STATUS_REJECTED);
8750 		goto unlock;
8751 	}
8752 
8753 	if (adv_busy(hdev)) {
8754 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8755 				      MGMT_STATUS_BUSY);
8756 		goto unlock;
8757 	}
8758 
8759 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8760 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8761 			       cp->scan_rsp_len, false)) {
8762 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8763 				      MGMT_STATUS_INVALID_PARAMS);
8764 		goto unlock;
8765 	}
8766 
8767 	prev_instance_cnt = hdev->adv_instance_cnt;
8768 
8769 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8770 				   cp->adv_data_len, cp->data,
8771 				   cp->scan_rsp_len,
8772 				   cp->data + cp->adv_data_len,
8773 				   timeout, duration,
8774 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8775 				   hdev->le_adv_min_interval,
8776 				   hdev->le_adv_max_interval, 0);
8777 	if (IS_ERR(adv)) {
8778 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8779 				      MGMT_STATUS_FAILED);
8780 		goto unlock;
8781 	}
8782 
8783 	/* Only trigger an advertising added event if a new instance was
8784 	 * actually added.
8785 	 */
8786 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8787 		mgmt_advertising_added(sk, hdev, cp->instance);
8788 
8789 	if (hdev->cur_adv_instance == cp->instance) {
8790 		/* If the currently advertised instance is being changed then
8791 		 * cancel the current advertising and schedule the next
8792 		 * instance. If there is only one instance then the overridden
8793 		 * advertising data will be visible right away.
8794 		 */
8795 		cancel_adv_timeout(hdev);
8796 
8797 		next_instance = hci_get_next_instance(hdev, cp->instance);
8798 		if (next_instance)
8799 			schedule_instance = next_instance->instance;
8800 	} else if (!hdev->adv_instance_timeout) {
8801 		/* Immediately advertise the new instance if no other
8802 		 * instance is currently being advertised.
8803 		 */
8804 		schedule_instance = cp->instance;
8805 	}
8806 
8807 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8808 	 * there is no instance to be advertised then we have no HCI
8809 	 * communication to make. Simply return.
8810 	 */
8811 	if (!hdev_is_powered(hdev) ||
8812 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8813 	    !schedule_instance) {
8814 		rp.instance = cp->instance;
8815 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8816 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8817 		goto unlock;
8818 	}
8819 
8820 	/* We're good to go, update advertising data, parameters, and start
8821 	 * advertising.
8822 	 */
8823 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8824 			       data_len);
8825 	if (!cmd) {
8826 		err = -ENOMEM;
8827 		goto unlock;
8828 	}
8829 
8830 	cp->instance = schedule_instance;
8831 
8832 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8833 				 add_advertising_complete);
8834 	if (err < 0)
8835 		mgmt_pending_free(cmd);
8836 
8837 unlock:
8838 	hci_dev_unlock(hdev);
8839 
8840 	return err;
8841 }
8842 
8843 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8844 					int err)
8845 {
8846 	struct mgmt_pending_cmd *cmd = data;
8847 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8848 	struct mgmt_rp_add_ext_adv_params rp;
8849 	struct adv_info *adv;
8850 	u32 flags;
8851 
8852 	BT_DBG("%s", hdev->name);
8853 
8854 	hci_dev_lock(hdev);
8855 
8856 	adv = hci_find_adv_instance(hdev, cp->instance);
8857 	if (!adv)
8858 		goto unlock;
8859 
8860 	rp.instance = cp->instance;
8861 	rp.tx_power = adv->tx_power;
8862 
8863 	/* While we're at it, inform userspace of the available space for this
8864 	 * advertisement, given the flags that will be used.
8865 	 */
8866 	flags = __le32_to_cpu(cp->flags);
8867 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8868 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8869 
8870 	if (err) {
8871 		/* If this advertisement was previously advertising and we
8872 		 * failed to update it, we signal that it has been removed and
8873 		 * delete its structure
8874 		 */
8875 		if (!adv->pending)
8876 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8877 
8878 		hci_remove_adv_instance(hdev, cp->instance);
8879 
8880 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8881 				mgmt_status(err));
8882 	} else {
8883 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8884 				  mgmt_status(err), &rp, sizeof(rp));
8885 	}
8886 
8887 unlock:
8888 	mgmt_pending_free(cmd);
8889 
8890 	hci_dev_unlock(hdev);
8891 }
8892 
8893 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8894 {
8895 	struct mgmt_pending_cmd *cmd = data;
8896 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8897 
8898 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8899 }
8900 
8901 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8902 			      void *data, u16 data_len)
8903 {
8904 	struct mgmt_cp_add_ext_adv_params *cp = data;
8905 	struct mgmt_rp_add_ext_adv_params rp;
8906 	struct mgmt_pending_cmd *cmd = NULL;
8907 	struct adv_info *adv;
8908 	u32 flags, min_interval, max_interval;
8909 	u16 timeout, duration;
8910 	u8 status;
8911 	s8 tx_power;
8912 	int err;
8913 
8914 	BT_DBG("%s", hdev->name);
8915 
8916 	status = mgmt_le_support(hdev);
8917 	if (status)
8918 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8919 				       status);
8920 
8921 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8923 				       MGMT_STATUS_INVALID_PARAMS);
8924 
8925 	/* The purpose of breaking add_advertising into two separate MGMT calls
8926 	 * for params and data is to allow more parameters to be added to this
8927 	 * structure in the future. For this reason, we verify that we have the
8928 	 * bare minimum structure we know of when the interface was defined. Any
8929 	 * extra parameters we don't know about will be ignored in this request.
8930 	 */
8931 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8932 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8933 				       MGMT_STATUS_INVALID_PARAMS);
8934 
8935 	flags = __le32_to_cpu(cp->flags);
8936 
8937 	if (!requested_adv_flags_are_valid(hdev, flags))
8938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8939 				       MGMT_STATUS_INVALID_PARAMS);
8940 
8941 	hci_dev_lock(hdev);
8942 
8943 	/* In new interface, we require that we are powered to register */
8944 	if (!hdev_is_powered(hdev)) {
8945 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8946 				      MGMT_STATUS_REJECTED);
8947 		goto unlock;
8948 	}
8949 
8950 	if (adv_busy(hdev)) {
8951 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8952 				      MGMT_STATUS_BUSY);
8953 		goto unlock;
8954 	}
8955 
8956 	/* Parse defined parameters from request, use defaults otherwise */
8957 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8958 		  __le16_to_cpu(cp->timeout) : 0;
8959 
8960 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8961 		   __le16_to_cpu(cp->duration) :
8962 		   hdev->def_multi_adv_rotation_duration;
8963 
8964 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8965 		       __le32_to_cpu(cp->min_interval) :
8966 		       hdev->le_adv_min_interval;
8967 
8968 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8969 		       __le32_to_cpu(cp->max_interval) :
8970 		       hdev->le_adv_max_interval;
8971 
8972 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8973 		   cp->tx_power :
8974 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8975 
8976 	/* Create advertising instance with no advertising or response data */
8977 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8978 				   timeout, duration, tx_power, min_interval,
8979 				   max_interval, 0);
8980 
8981 	if (IS_ERR(adv)) {
8982 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8983 				      MGMT_STATUS_FAILED);
8984 		goto unlock;
8985 	}
8986 
8987 	/* Submit request for advertising params if ext adv available */
8988 	if (ext_adv_capable(hdev)) {
8989 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8990 				       data, data_len);
8991 		if (!cmd) {
8992 			err = -ENOMEM;
8993 			hci_remove_adv_instance(hdev, cp->instance);
8994 			goto unlock;
8995 		}
8996 
8997 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8998 					 add_ext_adv_params_complete);
8999 		if (err < 0)
9000 			mgmt_pending_free(cmd);
9001 	} else {
9002 		rp.instance = cp->instance;
9003 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9004 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9005 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9006 		err = mgmt_cmd_complete(sk, hdev->id,
9007 					MGMT_OP_ADD_EXT_ADV_PARAMS,
9008 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9009 	}
9010 
9011 unlock:
9012 	hci_dev_unlock(hdev);
9013 
9014 	return err;
9015 }
9016 
9017 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9018 {
9019 	struct mgmt_pending_cmd *cmd = data;
9020 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9021 	struct mgmt_rp_add_advertising rp;
9022 
9023 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
9024 
9025 	memset(&rp, 0, sizeof(rp));
9026 
9027 	rp.instance = cp->instance;
9028 
9029 	if (err)
9030 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9031 				mgmt_status(err));
9032 	else
9033 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9034 				  mgmt_status(err), &rp, sizeof(rp));
9035 
9036 	mgmt_pending_free(cmd);
9037 }
9038 
9039 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9040 {
9041 	struct mgmt_pending_cmd *cmd = data;
9042 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9043 	int err;
9044 
9045 	if (ext_adv_capable(hdev)) {
9046 		err = hci_update_adv_data_sync(hdev, cp->instance);
9047 		if (err)
9048 			return err;
9049 
9050 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9051 		if (err)
9052 			return err;
9053 
9054 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
9055 	}
9056 
9057 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9058 }
9059 
9060 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9061 			    u16 data_len)
9062 {
9063 	struct mgmt_cp_add_ext_adv_data *cp = data;
9064 	struct mgmt_rp_add_ext_adv_data rp;
9065 	u8 schedule_instance = 0;
9066 	struct adv_info *next_instance;
9067 	struct adv_info *adv_instance;
9068 	int err = 0;
9069 	struct mgmt_pending_cmd *cmd;
9070 
9071 	BT_DBG("%s", hdev->name);
9072 
9073 	hci_dev_lock(hdev);
9074 
9075 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9076 
9077 	if (!adv_instance) {
9078 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9079 				      MGMT_STATUS_INVALID_PARAMS);
9080 		goto unlock;
9081 	}
9082 
9083 	/* In new interface, we require that we are powered to register */
9084 	if (!hdev_is_powered(hdev)) {
9085 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9086 				      MGMT_STATUS_REJECTED);
9087 		goto clear_new_instance;
9088 	}
9089 
9090 	if (adv_busy(hdev)) {
9091 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9092 				      MGMT_STATUS_BUSY);
9093 		goto clear_new_instance;
9094 	}
9095 
9096 	/* Validate new data */
9097 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9098 			       cp->adv_data_len, true) ||
9099 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9100 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9101 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9102 				      MGMT_STATUS_INVALID_PARAMS);
9103 		goto clear_new_instance;
9104 	}
9105 
9106 	/* Set the data in the advertising instance */
9107 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9108 				  cp->data, cp->scan_rsp_len,
9109 				  cp->data + cp->adv_data_len);
9110 
9111 	/* If using software rotation, determine next instance to use */
9112 	if (hdev->cur_adv_instance == cp->instance) {
9113 		/* If the currently advertised instance is being changed
9114 		 * then cancel the current advertising and schedule the
9115 		 * next instance. If there is only one instance then the
9116 		 * overridden advertising data will be visible right
9117 		 * away
9118 		 */
9119 		cancel_adv_timeout(hdev);
9120 
9121 		next_instance = hci_get_next_instance(hdev, cp->instance);
9122 		if (next_instance)
9123 			schedule_instance = next_instance->instance;
9124 	} else if (!hdev->adv_instance_timeout) {
9125 		/* Immediately advertise the new instance if no other
9126 		 * instance is currently being advertised.
9127 		 */
9128 		schedule_instance = cp->instance;
9129 	}
9130 
9131 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9132 	 * be advertised then we have no HCI communication to make.
9133 	 * Simply return.
9134 	 */
9135 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9136 		if (adv_instance->pending) {
9137 			mgmt_advertising_added(sk, hdev, cp->instance);
9138 			adv_instance->pending = false;
9139 		}
9140 		rp.instance = cp->instance;
9141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9142 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9143 		goto unlock;
9144 	}
9145 
9146 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9147 			       data_len);
9148 	if (!cmd) {
9149 		err = -ENOMEM;
9150 		goto clear_new_instance;
9151 	}
9152 
9153 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9154 				 add_ext_adv_data_complete);
9155 	if (err < 0) {
9156 		mgmt_pending_free(cmd);
9157 		goto clear_new_instance;
9158 	}
9159 
9160 	/* We were successful in updating data, so trigger advertising_added
9161 	 * event if this is an instance that wasn't previously advertising. If
9162 	 * a failure occurs in the requests we initiated, we will remove the
9163 	 * instance again in add_advertising_complete
9164 	 */
9165 	if (adv_instance->pending)
9166 		mgmt_advertising_added(sk, hdev, cp->instance);
9167 
9168 	goto unlock;
9169 
9170 clear_new_instance:
9171 	hci_remove_adv_instance(hdev, cp->instance);
9172 
9173 unlock:
9174 	hci_dev_unlock(hdev);
9175 
9176 	return err;
9177 }
9178 
9179 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9180 					int err)
9181 {
9182 	struct mgmt_pending_cmd *cmd = data;
9183 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9184 	struct mgmt_rp_remove_advertising rp;
9185 
9186 	bt_dev_dbg(hdev, "err %d", err);
9187 
9188 	memset(&rp, 0, sizeof(rp));
9189 	rp.instance = cp->instance;
9190 
9191 	if (err)
9192 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9193 				mgmt_status(err));
9194 	else
9195 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9196 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9197 
9198 	mgmt_pending_free(cmd);
9199 }
9200 
9201 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9202 {
9203 	struct mgmt_pending_cmd *cmd = data;
9204 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9205 	int err;
9206 
9207 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9208 	if (err)
9209 		return err;
9210 
9211 	if (list_empty(&hdev->adv_instances))
9212 		err = hci_disable_advertising_sync(hdev);
9213 
9214 	return err;
9215 }
9216 
9217 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9218 			      void *data, u16 data_len)
9219 {
9220 	struct mgmt_cp_remove_advertising *cp = data;
9221 	struct mgmt_pending_cmd *cmd;
9222 	int err;
9223 
9224 	bt_dev_dbg(hdev, "sock %p", sk);
9225 
9226 	hci_dev_lock(hdev);
9227 
9228 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9229 		err = mgmt_cmd_status(sk, hdev->id,
9230 				      MGMT_OP_REMOVE_ADVERTISING,
9231 				      MGMT_STATUS_INVALID_PARAMS);
9232 		goto unlock;
9233 	}
9234 
9235 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9237 				      MGMT_STATUS_BUSY);
9238 		goto unlock;
9239 	}
9240 
9241 	if (list_empty(&hdev->adv_instances)) {
9242 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9243 				      MGMT_STATUS_INVALID_PARAMS);
9244 		goto unlock;
9245 	}
9246 
9247 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9248 			       data_len);
9249 	if (!cmd) {
9250 		err = -ENOMEM;
9251 		goto unlock;
9252 	}
9253 
9254 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9255 				 remove_advertising_complete);
9256 	if (err < 0)
9257 		mgmt_pending_free(cmd);
9258 
9259 unlock:
9260 	hci_dev_unlock(hdev);
9261 
9262 	return err;
9263 }
9264 
9265 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9266 			     void *data, u16 data_len)
9267 {
9268 	struct mgmt_cp_get_adv_size_info *cp = data;
9269 	struct mgmt_rp_get_adv_size_info rp;
9270 	u32 flags, supported_flags;
9271 
9272 	bt_dev_dbg(hdev, "sock %p", sk);
9273 
9274 	if (!lmp_le_capable(hdev))
9275 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9276 				       MGMT_STATUS_REJECTED);
9277 
9278 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9279 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9280 				       MGMT_STATUS_INVALID_PARAMS);
9281 
9282 	flags = __le32_to_cpu(cp->flags);
9283 
9284 	/* The current implementation only supports a subset of the specified
9285 	 * flags.
9286 	 */
9287 	supported_flags = get_supported_adv_flags(hdev);
9288 	if (flags & ~supported_flags)
9289 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9290 				       MGMT_STATUS_INVALID_PARAMS);
9291 
9292 	rp.instance = cp->instance;
9293 	rp.flags = cp->flags;
9294 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9295 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9296 
9297 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9298 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9299 }
9300 
9301 static const struct hci_mgmt_handler mgmt_handlers[] = {
9302 	{ NULL }, /* 0x0000 (no command) */
9303 	{ read_version,            MGMT_READ_VERSION_SIZE,
9304 						HCI_MGMT_NO_HDEV |
9305 						HCI_MGMT_UNTRUSTED },
9306 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9307 						HCI_MGMT_NO_HDEV |
9308 						HCI_MGMT_UNTRUSTED },
9309 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9310 						HCI_MGMT_NO_HDEV |
9311 						HCI_MGMT_UNTRUSTED },
9312 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9313 						HCI_MGMT_UNTRUSTED },
9314 	{ set_powered,             MGMT_SETTING_SIZE },
9315 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9316 	{ set_connectable,         MGMT_SETTING_SIZE },
9317 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9318 	{ set_bondable,            MGMT_SETTING_SIZE },
9319 	{ set_link_security,       MGMT_SETTING_SIZE },
9320 	{ set_ssp,                 MGMT_SETTING_SIZE },
9321 	{ set_hs,                  MGMT_SETTING_SIZE },
9322 	{ set_le,                  MGMT_SETTING_SIZE },
9323 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9324 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9325 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9326 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9327 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9328 						HCI_MGMT_VAR_LEN },
9329 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9330 						HCI_MGMT_VAR_LEN },
9331 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9332 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9333 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9334 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9335 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9336 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9337 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9338 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9339 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9340 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9341 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9342 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9343 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9344 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9345 						HCI_MGMT_VAR_LEN },
9346 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9347 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9348 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9349 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9350 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9351 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9352 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9353 	{ set_advertising,         MGMT_SETTING_SIZE },
9354 	{ set_bredr,               MGMT_SETTING_SIZE },
9355 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9356 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9357 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9358 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9359 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9360 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9361 						HCI_MGMT_VAR_LEN },
9362 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9363 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9364 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9365 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9366 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9367 						HCI_MGMT_VAR_LEN },
9368 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9369 						HCI_MGMT_NO_HDEV |
9370 						HCI_MGMT_UNTRUSTED },
9371 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9372 						HCI_MGMT_UNCONFIGURED |
9373 						HCI_MGMT_UNTRUSTED },
9374 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9375 						HCI_MGMT_UNCONFIGURED },
9376 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9377 						HCI_MGMT_UNCONFIGURED },
9378 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9379 						HCI_MGMT_VAR_LEN },
9380 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9381 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9382 						HCI_MGMT_NO_HDEV |
9383 						HCI_MGMT_UNTRUSTED },
9384 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9385 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9386 						HCI_MGMT_VAR_LEN },
9387 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9388 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9389 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9390 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9391 						HCI_MGMT_UNTRUSTED },
9392 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9393 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9394 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9395 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9396 						HCI_MGMT_VAR_LEN },
9397 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9398 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9399 						HCI_MGMT_UNTRUSTED },
9400 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9401 						HCI_MGMT_UNTRUSTED |
9402 						HCI_MGMT_HDEV_OPTIONAL },
9403 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9404 						HCI_MGMT_VAR_LEN |
9405 						HCI_MGMT_HDEV_OPTIONAL },
9406 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9407 						HCI_MGMT_UNTRUSTED },
9408 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9409 						HCI_MGMT_VAR_LEN },
9410 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9411 						HCI_MGMT_UNTRUSTED },
9412 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9413 						HCI_MGMT_VAR_LEN },
9414 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9415 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9416 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9417 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9418 						HCI_MGMT_VAR_LEN },
9419 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9420 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9421 						HCI_MGMT_VAR_LEN },
9422 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9423 						HCI_MGMT_VAR_LEN },
9424 	{ add_adv_patterns_monitor_rssi,
9425 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9426 						HCI_MGMT_VAR_LEN },
9427 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9428 						HCI_MGMT_VAR_LEN },
9429 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9430 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9431 						HCI_MGMT_VAR_LEN },
9432 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9433 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9434 };
9435 
9436 void mgmt_index_added(struct hci_dev *hdev)
9437 {
9438 	struct mgmt_ev_ext_index ev;
9439 
9440 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9441 		return;
9442 
9443 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9444 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9445 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9446 		ev.type = 0x01;
9447 	} else {
9448 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9449 				 HCI_MGMT_INDEX_EVENTS);
9450 		ev.type = 0x00;
9451 	}
9452 
9453 	ev.bus = hdev->bus;
9454 
9455 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9456 			 HCI_MGMT_EXT_INDEX_EVENTS);
9457 }
9458 
9459 void mgmt_index_removed(struct hci_dev *hdev)
9460 {
9461 	struct mgmt_ev_ext_index ev;
9462 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9463 
9464 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9465 		return;
9466 
9467 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9468 
9469 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9470 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9471 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9472 		ev.type = 0x01;
9473 	} else {
9474 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9475 				 HCI_MGMT_INDEX_EVENTS);
9476 		ev.type = 0x00;
9477 	}
9478 
9479 	ev.bus = hdev->bus;
9480 
9481 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9482 			 HCI_MGMT_EXT_INDEX_EVENTS);
9483 
9484 	/* Cancel any remaining timed work */
9485 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9486 		return;
9487 	cancel_delayed_work_sync(&hdev->discov_off);
9488 	cancel_delayed_work_sync(&hdev->service_cache);
9489 	cancel_delayed_work_sync(&hdev->rpa_expired);
9490 }
9491 
9492 void mgmt_power_on(struct hci_dev *hdev, int err)
9493 {
9494 	struct cmd_lookup match = { NULL, hdev };
9495 
9496 	bt_dev_dbg(hdev, "err %d", err);
9497 
9498 	hci_dev_lock(hdev);
9499 
9500 	if (!err) {
9501 		restart_le_actions(hdev);
9502 		hci_update_passive_scan(hdev);
9503 	}
9504 
9505 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9506 
9507 	new_settings(hdev, match.sk);
9508 
9509 	if (match.sk)
9510 		sock_put(match.sk);
9511 
9512 	hci_dev_unlock(hdev);
9513 }
9514 
9515 void __mgmt_power_off(struct hci_dev *hdev)
9516 {
9517 	struct cmd_lookup match = { NULL, hdev };
9518 	u8 zero_cod[] = { 0, 0, 0 };
9519 
9520 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9521 
9522 	/* If the power off is because of hdev unregistration let
9523 	 * use the appropriate INVALID_INDEX status. Otherwise use
9524 	 * NOT_POWERED. We cover both scenarios here since later in
9525 	 * mgmt_index_removed() any hci_conn callbacks will have already
9526 	 * been triggered, potentially causing misleading DISCONNECTED
9527 	 * status responses.
9528 	 */
9529 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9530 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9531 	else
9532 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9533 
9534 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9535 
9536 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9537 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9538 				   zero_cod, sizeof(zero_cod),
9539 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9540 		ext_info_changed(hdev, NULL);
9541 	}
9542 
9543 	new_settings(hdev, match.sk);
9544 
9545 	if (match.sk)
9546 		sock_put(match.sk);
9547 }
9548 
9549 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9550 {
9551 	struct mgmt_pending_cmd *cmd;
9552 	u8 status;
9553 
9554 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9555 	if (!cmd)
9556 		return;
9557 
9558 	if (err == -ERFKILL)
9559 		status = MGMT_STATUS_RFKILLED;
9560 	else
9561 		status = MGMT_STATUS_FAILED;
9562 
9563 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9564 
9565 	mgmt_pending_remove(cmd);
9566 }
9567 
9568 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9569 		       bool persistent)
9570 {
9571 	struct mgmt_ev_new_link_key ev;
9572 
9573 	memset(&ev, 0, sizeof(ev));
9574 
9575 	ev.store_hint = persistent;
9576 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9577 	ev.key.addr.type = BDADDR_BREDR;
9578 	ev.key.type = key->type;
9579 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9580 	ev.key.pin_len = key->pin_len;
9581 
9582 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9583 }
9584 
9585 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9586 {
9587 	switch (ltk->type) {
9588 	case SMP_LTK:
9589 	case SMP_LTK_RESPONDER:
9590 		if (ltk->authenticated)
9591 			return MGMT_LTK_AUTHENTICATED;
9592 		return MGMT_LTK_UNAUTHENTICATED;
9593 	case SMP_LTK_P256:
9594 		if (ltk->authenticated)
9595 			return MGMT_LTK_P256_AUTH;
9596 		return MGMT_LTK_P256_UNAUTH;
9597 	case SMP_LTK_P256_DEBUG:
9598 		return MGMT_LTK_P256_DEBUG;
9599 	}
9600 
9601 	return MGMT_LTK_UNAUTHENTICATED;
9602 }
9603 
9604 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9605 {
9606 	struct mgmt_ev_new_long_term_key ev;
9607 
9608 	memset(&ev, 0, sizeof(ev));
9609 
9610 	/* Devices using resolvable or non-resolvable random addresses
9611 	 * without providing an identity resolving key don't require
9612 	 * to store long term keys. Their addresses will change the
9613 	 * next time around.
9614 	 *
9615 	 * Only when a remote device provides an identity address
9616 	 * make sure the long term key is stored. If the remote
9617 	 * identity is known, the long term keys are internally
9618 	 * mapped to the identity address. So allow static random
9619 	 * and public addresses here.
9620 	 */
9621 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9622 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9623 		ev.store_hint = 0x00;
9624 	else
9625 		ev.store_hint = persistent;
9626 
9627 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9628 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9629 	ev.key.type = mgmt_ltk_type(key);
9630 	ev.key.enc_size = key->enc_size;
9631 	ev.key.ediv = key->ediv;
9632 	ev.key.rand = key->rand;
9633 
9634 	if (key->type == SMP_LTK)
9635 		ev.key.initiator = 1;
9636 
9637 	/* Make sure we copy only the significant bytes based on the
9638 	 * encryption key size, and set the rest of the value to zeroes.
9639 	 */
9640 	memcpy(ev.key.val, key->val, key->enc_size);
9641 	memset(ev.key.val + key->enc_size, 0,
9642 	       sizeof(ev.key.val) - key->enc_size);
9643 
9644 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9645 }
9646 
9647 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9648 {
9649 	struct mgmt_ev_new_irk ev;
9650 
9651 	memset(&ev, 0, sizeof(ev));
9652 
9653 	ev.store_hint = persistent;
9654 
9655 	bacpy(&ev.rpa, &irk->rpa);
9656 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9657 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9658 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9659 
9660 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9661 }
9662 
9663 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9664 		   bool persistent)
9665 {
9666 	struct mgmt_ev_new_csrk ev;
9667 
9668 	memset(&ev, 0, sizeof(ev));
9669 
9670 	/* Devices using resolvable or non-resolvable random addresses
9671 	 * without providing an identity resolving key don't require
9672 	 * to store signature resolving keys. Their addresses will change
9673 	 * the next time around.
9674 	 *
9675 	 * Only when a remote device provides an identity address
9676 	 * make sure the signature resolving key is stored. So allow
9677 	 * static random and public addresses here.
9678 	 */
9679 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9680 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9681 		ev.store_hint = 0x00;
9682 	else
9683 		ev.store_hint = persistent;
9684 
9685 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9686 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9687 	ev.key.type = csrk->type;
9688 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9689 
9690 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9691 }
9692 
9693 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9694 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9695 			 u16 max_interval, u16 latency, u16 timeout)
9696 {
9697 	struct mgmt_ev_new_conn_param ev;
9698 
9699 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9700 		return;
9701 
9702 	memset(&ev, 0, sizeof(ev));
9703 	bacpy(&ev.addr.bdaddr, bdaddr);
9704 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9705 	ev.store_hint = store_hint;
9706 	ev.min_interval = cpu_to_le16(min_interval);
9707 	ev.max_interval = cpu_to_le16(max_interval);
9708 	ev.latency = cpu_to_le16(latency);
9709 	ev.timeout = cpu_to_le16(timeout);
9710 
9711 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9712 }
9713 
9714 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9715 			   u8 *name, u8 name_len)
9716 {
9717 	struct sk_buff *skb;
9718 	struct mgmt_ev_device_connected *ev;
9719 	u16 eir_len = 0;
9720 	u32 flags = 0;
9721 
9722 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9723 		return;
9724 
9725 	/* allocate buff for LE or BR/EDR adv */
9726 	if (conn->le_adv_data_len > 0)
9727 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9728 				     sizeof(*ev) + conn->le_adv_data_len);
9729 	else
9730 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9731 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9732 				     eir_precalc_len(sizeof(conn->dev_class)));
9733 
9734 	ev = skb_put(skb, sizeof(*ev));
9735 	bacpy(&ev->addr.bdaddr, &conn->dst);
9736 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9737 
9738 	if (conn->out)
9739 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9740 
9741 	ev->flags = __cpu_to_le32(flags);
9742 
9743 	/* We must ensure that the EIR Data fields are ordered and
9744 	 * unique. Keep it simple for now and avoid the problem by not
9745 	 * adding any BR/EDR data to the LE adv.
9746 	 */
9747 	if (conn->le_adv_data_len > 0) {
9748 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9749 		eir_len = conn->le_adv_data_len;
9750 	} else {
9751 		if (name)
9752 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9753 
9754 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9755 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9756 						    conn->dev_class, sizeof(conn->dev_class));
9757 	}
9758 
9759 	ev->eir_len = cpu_to_le16(eir_len);
9760 
9761 	mgmt_event_skb(skb, NULL);
9762 }
9763 
9764 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9765 {
9766 	struct hci_dev *hdev = data;
9767 	struct mgmt_cp_unpair_device *cp = cmd->param;
9768 
9769 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9770 
9771 	cmd->cmd_complete(cmd, 0);
9772 	mgmt_pending_remove(cmd);
9773 }
9774 
9775 bool mgmt_powering_down(struct hci_dev *hdev)
9776 {
9777 	struct mgmt_pending_cmd *cmd;
9778 	struct mgmt_mode *cp;
9779 
9780 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9781 		return true;
9782 
9783 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9784 	if (!cmd)
9785 		return false;
9786 
9787 	cp = cmd->param;
9788 	if (!cp->val)
9789 		return true;
9790 
9791 	return false;
9792 }
9793 
9794 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9795 			      u8 link_type, u8 addr_type, u8 reason,
9796 			      bool mgmt_connected)
9797 {
9798 	struct mgmt_ev_device_disconnected ev;
9799 	struct sock *sk = NULL;
9800 
9801 	if (!mgmt_connected)
9802 		return;
9803 
9804 	if (link_type != ACL_LINK && link_type != LE_LINK)
9805 		return;
9806 
9807 	bacpy(&ev.addr.bdaddr, bdaddr);
9808 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9809 	ev.reason = reason;
9810 
9811 	/* Report disconnects due to suspend */
9812 	if (hdev->suspended)
9813 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9814 
9815 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9816 
9817 	if (sk)
9818 		sock_put(sk);
9819 }
9820 
9821 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9822 			    u8 link_type, u8 addr_type, u8 status)
9823 {
9824 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9825 	struct mgmt_cp_disconnect *cp;
9826 	struct mgmt_pending_cmd *cmd;
9827 
9828 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9829 			     hdev);
9830 
9831 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9832 	if (!cmd)
9833 		return;
9834 
9835 	cp = cmd->param;
9836 
9837 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9838 		return;
9839 
9840 	if (cp->addr.type != bdaddr_type)
9841 		return;
9842 
9843 	cmd->cmd_complete(cmd, mgmt_status(status));
9844 	mgmt_pending_remove(cmd);
9845 }
9846 
9847 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9848 {
9849 	struct mgmt_ev_connect_failed ev;
9850 
9851 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9852 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9853 					 conn->dst_type, status, true);
9854 		return;
9855 	}
9856 
9857 	bacpy(&ev.addr.bdaddr, &conn->dst);
9858 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9859 	ev.status = mgmt_status(status);
9860 
9861 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9862 }
9863 
9864 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9865 {
9866 	struct mgmt_ev_pin_code_request ev;
9867 
9868 	bacpy(&ev.addr.bdaddr, bdaddr);
9869 	ev.addr.type = BDADDR_BREDR;
9870 	ev.secure = secure;
9871 
9872 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9873 }
9874 
9875 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876 				  u8 status)
9877 {
9878 	struct mgmt_pending_cmd *cmd;
9879 
9880 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9881 	if (!cmd)
9882 		return;
9883 
9884 	cmd->cmd_complete(cmd, mgmt_status(status));
9885 	mgmt_pending_remove(cmd);
9886 }
9887 
9888 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9889 				      u8 status)
9890 {
9891 	struct mgmt_pending_cmd *cmd;
9892 
9893 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9894 	if (!cmd)
9895 		return;
9896 
9897 	cmd->cmd_complete(cmd, mgmt_status(status));
9898 	mgmt_pending_remove(cmd);
9899 }
9900 
9901 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9902 			      u8 link_type, u8 addr_type, u32 value,
9903 			      u8 confirm_hint)
9904 {
9905 	struct mgmt_ev_user_confirm_request ev;
9906 
9907 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9908 
9909 	bacpy(&ev.addr.bdaddr, bdaddr);
9910 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9911 	ev.confirm_hint = confirm_hint;
9912 	ev.value = cpu_to_le32(value);
9913 
9914 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9915 			  NULL);
9916 }
9917 
9918 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9919 			      u8 link_type, u8 addr_type)
9920 {
9921 	struct mgmt_ev_user_passkey_request ev;
9922 
9923 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9924 
9925 	bacpy(&ev.addr.bdaddr, bdaddr);
9926 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9927 
9928 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9929 			  NULL);
9930 }
9931 
9932 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9933 				      u8 link_type, u8 addr_type, u8 status,
9934 				      u8 opcode)
9935 {
9936 	struct mgmt_pending_cmd *cmd;
9937 
9938 	cmd = pending_find(opcode, hdev);
9939 	if (!cmd)
9940 		return -ENOENT;
9941 
9942 	cmd->cmd_complete(cmd, mgmt_status(status));
9943 	mgmt_pending_remove(cmd);
9944 
9945 	return 0;
9946 }
9947 
9948 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9949 				     u8 link_type, u8 addr_type, u8 status)
9950 {
9951 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9952 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9953 }
9954 
9955 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9956 					 u8 link_type, u8 addr_type, u8 status)
9957 {
9958 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9959 					  status,
9960 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9961 }
9962 
9963 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9964 				     u8 link_type, u8 addr_type, u8 status)
9965 {
9966 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9967 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9968 }
9969 
9970 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9971 					 u8 link_type, u8 addr_type, u8 status)
9972 {
9973 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9974 					  status,
9975 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9976 }
9977 
9978 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9979 			     u8 link_type, u8 addr_type, u32 passkey,
9980 			     u8 entered)
9981 {
9982 	struct mgmt_ev_passkey_notify ev;
9983 
9984 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9985 
9986 	bacpy(&ev.addr.bdaddr, bdaddr);
9987 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9988 	ev.passkey = __cpu_to_le32(passkey);
9989 	ev.entered = entered;
9990 
9991 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9992 }
9993 
9994 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9995 {
9996 	struct mgmt_ev_auth_failed ev;
9997 	struct mgmt_pending_cmd *cmd;
9998 	u8 status = mgmt_status(hci_status);
9999 
10000 	bacpy(&ev.addr.bdaddr, &conn->dst);
10001 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10002 	ev.status = status;
10003 
10004 	cmd = find_pairing(conn);
10005 
10006 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10007 		    cmd ? cmd->sk : NULL);
10008 
10009 	if (cmd) {
10010 		cmd->cmd_complete(cmd, status);
10011 		mgmt_pending_remove(cmd);
10012 	}
10013 }
10014 
10015 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10016 {
10017 	struct cmd_lookup match = { NULL, hdev };
10018 	bool changed;
10019 
10020 	if (status) {
10021 		u8 mgmt_err = mgmt_status(status);
10022 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10023 				     cmd_status_rsp, &mgmt_err);
10024 		return;
10025 	}
10026 
10027 	if (test_bit(HCI_AUTH, &hdev->flags))
10028 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10029 	else
10030 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10031 
10032 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10033 			     &match);
10034 
10035 	if (changed)
10036 		new_settings(hdev, match.sk);
10037 
10038 	if (match.sk)
10039 		sock_put(match.sk);
10040 }
10041 
10042 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10043 {
10044 	struct cmd_lookup *match = data;
10045 
10046 	if (match->sk == NULL) {
10047 		match->sk = cmd->sk;
10048 		sock_hold(match->sk);
10049 	}
10050 }
10051 
10052 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10053 				    u8 status)
10054 {
10055 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10056 
10057 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10058 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10059 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10060 
10061 	if (!status) {
10062 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10063 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10064 		ext_info_changed(hdev, NULL);
10065 	}
10066 
10067 	if (match.sk)
10068 		sock_put(match.sk);
10069 }
10070 
10071 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10072 {
10073 	struct mgmt_cp_set_local_name ev;
10074 	struct mgmt_pending_cmd *cmd;
10075 
10076 	if (status)
10077 		return;
10078 
10079 	memset(&ev, 0, sizeof(ev));
10080 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10081 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10082 
10083 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10084 	if (!cmd) {
10085 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10086 
10087 		/* If this is a HCI command related to powering on the
10088 		 * HCI dev don't send any mgmt signals.
10089 		 */
10090 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10091 			return;
10092 
10093 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10094 			return;
10095 	}
10096 
10097 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10098 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10099 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10100 }
10101 
10102 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10103 {
10104 	int i;
10105 
10106 	for (i = 0; i < uuid_count; i++) {
10107 		if (!memcmp(uuid, uuids[i], 16))
10108 			return true;
10109 	}
10110 
10111 	return false;
10112 }
10113 
10114 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10115 {
10116 	u16 parsed = 0;
10117 
10118 	while (parsed < eir_len) {
10119 		u8 field_len = eir[0];
10120 		u8 uuid[16];
10121 		int i;
10122 
10123 		if (field_len == 0)
10124 			break;
10125 
10126 		if (eir_len - parsed < field_len + 1)
10127 			break;
10128 
10129 		switch (eir[1]) {
10130 		case EIR_UUID16_ALL:
10131 		case EIR_UUID16_SOME:
10132 			for (i = 0; i + 3 <= field_len; i += 2) {
10133 				memcpy(uuid, bluetooth_base_uuid, 16);
10134 				uuid[13] = eir[i + 3];
10135 				uuid[12] = eir[i + 2];
10136 				if (has_uuid(uuid, uuid_count, uuids))
10137 					return true;
10138 			}
10139 			break;
10140 		case EIR_UUID32_ALL:
10141 		case EIR_UUID32_SOME:
10142 			for (i = 0; i + 5 <= field_len; i += 4) {
10143 				memcpy(uuid, bluetooth_base_uuid, 16);
10144 				uuid[15] = eir[i + 5];
10145 				uuid[14] = eir[i + 4];
10146 				uuid[13] = eir[i + 3];
10147 				uuid[12] = eir[i + 2];
10148 				if (has_uuid(uuid, uuid_count, uuids))
10149 					return true;
10150 			}
10151 			break;
10152 		case EIR_UUID128_ALL:
10153 		case EIR_UUID128_SOME:
10154 			for (i = 0; i + 17 <= field_len; i += 16) {
10155 				memcpy(uuid, eir + i + 2, 16);
10156 				if (has_uuid(uuid, uuid_count, uuids))
10157 					return true;
10158 			}
10159 			break;
10160 		}
10161 
10162 		parsed += field_len + 1;
10163 		eir += field_len + 1;
10164 	}
10165 
10166 	return false;
10167 }
10168 
10169 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10170 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10171 {
10172 	/* If a RSSI threshold has been specified, and
10173 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10174 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10175 	 * is set, let it through for further processing, as we might need to
10176 	 * restart the scan.
10177 	 *
10178 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10179 	 * the results are also dropped.
10180 	 */
10181 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10182 	    (rssi == HCI_RSSI_INVALID ||
10183 	    (rssi < hdev->discovery.rssi &&
10184 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10185 		return  false;
10186 
10187 	if (hdev->discovery.uuid_count != 0) {
10188 		/* If a list of UUIDs is provided in filter, results with no
10189 		 * matching UUID should be dropped.
10190 		 */
10191 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10192 				   hdev->discovery.uuids) &&
10193 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10194 				   hdev->discovery.uuid_count,
10195 				   hdev->discovery.uuids))
10196 			return false;
10197 	}
10198 
10199 	/* If duplicate filtering does not report RSSI changes, then restart
10200 	 * scanning to ensure updated result with updated RSSI values.
10201 	 */
10202 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10203 		/* Validate RSSI value against the RSSI threshold once more. */
10204 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10205 		    rssi < hdev->discovery.rssi)
10206 			return false;
10207 	}
10208 
10209 	return true;
10210 }
10211 
10212 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10213 				  bdaddr_t *bdaddr, u8 addr_type)
10214 {
10215 	struct mgmt_ev_adv_monitor_device_lost ev;
10216 
10217 	ev.monitor_handle = cpu_to_le16(handle);
10218 	bacpy(&ev.addr.bdaddr, bdaddr);
10219 	ev.addr.type = addr_type;
10220 
10221 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10222 		   NULL);
10223 }
10224 
10225 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10226 					       struct sk_buff *skb,
10227 					       struct sock *skip_sk,
10228 					       u16 handle)
10229 {
10230 	struct sk_buff *advmon_skb;
10231 	size_t advmon_skb_len;
10232 	__le16 *monitor_handle;
10233 
10234 	if (!skb)
10235 		return;
10236 
10237 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10238 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10239 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10240 				    advmon_skb_len);
10241 	if (!advmon_skb)
10242 		return;
10243 
10244 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10245 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10246 	 * store monitor_handle of the matched monitor.
10247 	 */
10248 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10249 	*monitor_handle = cpu_to_le16(handle);
10250 	skb_put_data(advmon_skb, skb->data, skb->len);
10251 
10252 	mgmt_event_skb(advmon_skb, skip_sk);
10253 }
10254 
10255 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10256 					  bdaddr_t *bdaddr, bool report_device,
10257 					  struct sk_buff *skb,
10258 					  struct sock *skip_sk)
10259 {
10260 	struct monitored_device *dev, *tmp;
10261 	bool matched = false;
10262 	bool notified = false;
10263 
10264 	/* We have received the Advertisement Report because:
10265 	 * 1. the kernel has initiated active discovery
10266 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10267 	 *    passive scanning
10268 	 * 3. if none of the above is true, we have one or more active
10269 	 *    Advertisement Monitor
10270 	 *
10271 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10272 	 * and report ONLY one advertisement per device for the matched Monitor
10273 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10274 	 *
10275 	 * For case 3, since we are not active scanning and all advertisements
10276 	 * received are due to a matched Advertisement Monitor, report all
10277 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10278 	 */
10279 	if (report_device && !hdev->advmon_pend_notify) {
10280 		mgmt_event_skb(skb, skip_sk);
10281 		return;
10282 	}
10283 
10284 	hdev->advmon_pend_notify = false;
10285 
10286 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10287 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10288 			matched = true;
10289 
10290 			if (!dev->notified) {
10291 				mgmt_send_adv_monitor_device_found(hdev, skb,
10292 								   skip_sk,
10293 								   dev->handle);
10294 				notified = true;
10295 				dev->notified = true;
10296 			}
10297 		}
10298 
10299 		if (!dev->notified)
10300 			hdev->advmon_pend_notify = true;
10301 	}
10302 
10303 	if (!report_device &&
10304 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10305 		/* Handle 0 indicates that we are not active scanning and this
10306 		 * is a subsequent advertisement report for an already matched
10307 		 * Advertisement Monitor or the controller offloading support
10308 		 * is not available.
10309 		 */
10310 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10311 	}
10312 
10313 	if (report_device)
10314 		mgmt_event_skb(skb, skip_sk);
10315 	else
10316 		kfree_skb(skb);
10317 }
10318 
10319 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10320 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10321 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10322 			      u64 instant)
10323 {
10324 	struct sk_buff *skb;
10325 	struct mgmt_ev_mesh_device_found *ev;
10326 	int i, j;
10327 
10328 	if (!hdev->mesh_ad_types[0])
10329 		goto accepted;
10330 
10331 	/* Scan for requested AD types */
10332 	if (eir_len > 0) {
10333 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10334 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10335 				if (!hdev->mesh_ad_types[j])
10336 					break;
10337 
10338 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10339 					goto accepted;
10340 			}
10341 		}
10342 	}
10343 
10344 	if (scan_rsp_len > 0) {
10345 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10346 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10347 				if (!hdev->mesh_ad_types[j])
10348 					break;
10349 
10350 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10351 					goto accepted;
10352 			}
10353 		}
10354 	}
10355 
10356 	return;
10357 
10358 accepted:
10359 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10360 			     sizeof(*ev) + eir_len + scan_rsp_len);
10361 	if (!skb)
10362 		return;
10363 
10364 	ev = skb_put(skb, sizeof(*ev));
10365 
10366 	bacpy(&ev->addr.bdaddr, bdaddr);
10367 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10368 	ev->rssi = rssi;
10369 	ev->flags = cpu_to_le32(flags);
10370 	ev->instant = cpu_to_le64(instant);
10371 
10372 	if (eir_len > 0)
10373 		/* Copy EIR or advertising data into event */
10374 		skb_put_data(skb, eir, eir_len);
10375 
10376 	if (scan_rsp_len > 0)
10377 		/* Append scan response data to event */
10378 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10379 
10380 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10381 
10382 	mgmt_event_skb(skb, NULL);
10383 }
10384 
10385 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10386 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10387 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10388 		       u64 instant)
10389 {
10390 	struct sk_buff *skb;
10391 	struct mgmt_ev_device_found *ev;
10392 	bool report_device = hci_discovery_active(hdev);
10393 
10394 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10395 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10396 				  eir, eir_len, scan_rsp, scan_rsp_len,
10397 				  instant);
10398 
10399 	/* Don't send events for a non-kernel initiated discovery. With
10400 	 * LE one exception is if we have pend_le_reports > 0 in which
10401 	 * case we're doing passive scanning and want these events.
10402 	 */
10403 	if (!hci_discovery_active(hdev)) {
10404 		if (link_type == ACL_LINK)
10405 			return;
10406 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10407 			report_device = true;
10408 		else if (!hci_is_adv_monitoring(hdev))
10409 			return;
10410 	}
10411 
10412 	if (hdev->discovery.result_filtering) {
10413 		/* We are using service discovery */
10414 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10415 				     scan_rsp_len))
10416 			return;
10417 	}
10418 
10419 	if (hdev->discovery.limited) {
10420 		/* Check for limited discoverable bit */
10421 		if (dev_class) {
10422 			if (!(dev_class[1] & 0x20))
10423 				return;
10424 		} else {
10425 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10426 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10427 				return;
10428 		}
10429 	}
10430 
10431 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10432 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10433 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10434 	if (!skb)
10435 		return;
10436 
10437 	ev = skb_put(skb, sizeof(*ev));
10438 
10439 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10440 	 * RSSI value was reported as 0 when not available. This behavior
10441 	 * is kept when using device discovery. This is required for full
10442 	 * backwards compatibility with the API.
10443 	 *
10444 	 * However when using service discovery, the value 127 will be
10445 	 * returned when the RSSI is not available.
10446 	 */
10447 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10448 	    link_type == ACL_LINK)
10449 		rssi = 0;
10450 
10451 	bacpy(&ev->addr.bdaddr, bdaddr);
10452 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10453 	ev->rssi = rssi;
10454 	ev->flags = cpu_to_le32(flags);
10455 
10456 	if (eir_len > 0)
10457 		/* Copy EIR or advertising data into event */
10458 		skb_put_data(skb, eir, eir_len);
10459 
10460 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10461 		u8 eir_cod[5];
10462 
10463 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10464 					   dev_class, 3);
10465 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10466 	}
10467 
10468 	if (scan_rsp_len > 0)
10469 		/* Append scan response data to event */
10470 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10471 
10472 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10473 
10474 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10475 }
10476 
10477 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10478 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10479 {
10480 	struct sk_buff *skb;
10481 	struct mgmt_ev_device_found *ev;
10482 	u16 eir_len = 0;
10483 	u32 flags = 0;
10484 
10485 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10486 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10487 
10488 	ev = skb_put(skb, sizeof(*ev));
10489 	bacpy(&ev->addr.bdaddr, bdaddr);
10490 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10491 	ev->rssi = rssi;
10492 
10493 	if (name)
10494 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10495 	else
10496 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10497 
10498 	ev->eir_len = cpu_to_le16(eir_len);
10499 	ev->flags = cpu_to_le32(flags);
10500 
10501 	mgmt_event_skb(skb, NULL);
10502 }
10503 
10504 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10505 {
10506 	struct mgmt_ev_discovering ev;
10507 
10508 	bt_dev_dbg(hdev, "discovering %u", discovering);
10509 
10510 	memset(&ev, 0, sizeof(ev));
10511 	ev.type = hdev->discovery.type;
10512 	ev.discovering = discovering;
10513 
10514 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10515 }
10516 
10517 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10518 {
10519 	struct mgmt_ev_controller_suspend ev;
10520 
10521 	ev.suspend_state = state;
10522 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10523 }
10524 
10525 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10526 		   u8 addr_type)
10527 {
10528 	struct mgmt_ev_controller_resume ev;
10529 
10530 	ev.wake_reason = reason;
10531 	if (bdaddr) {
10532 		bacpy(&ev.addr.bdaddr, bdaddr);
10533 		ev.addr.type = addr_type;
10534 	} else {
10535 		memset(&ev.addr, 0, sizeof(ev.addr));
10536 	}
10537 
10538 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10539 }
10540 
10541 static struct hci_mgmt_chan chan = {
10542 	.channel	= HCI_CHANNEL_CONTROL,
10543 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10544 	.handlers	= mgmt_handlers,
10545 	.hdev_init	= mgmt_init_hdev,
10546 };
10547 
10548 int mgmt_init(void)
10549 {
10550 	return hci_mgmt_chan_register(&chan);
10551 }
10552 
10553 void mgmt_exit(void)
10554 {
10555 	hci_mgmt_chan_unregister(&chan);
10556 }
10557 
10558 void mgmt_cleanup(struct sock *sk)
10559 {
10560 	struct mgmt_mesh_tx *mesh_tx;
10561 	struct hci_dev *hdev;
10562 
10563 	read_lock(&hci_dev_list_lock);
10564 
10565 	list_for_each_entry(hdev, &hci_dev_list, list) {
10566 		do {
10567 			mesh_tx = mgmt_mesh_next(hdev, sk);
10568 
10569 			if (mesh_tx)
10570 				mesh_send_complete(hdev, mesh_tx, true);
10571 		} while (mesh_tx);
10572 	}
10573 
10574 	read_unlock(&hci_dev_list_lock);
10575 }
10576