xref: /linux/net/bluetooth/mgmt.c (revision 30bbcb44707a97fcb62246bebc8b413b5ab293f8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	secs_to_jiffies(2)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 	}
834 
835 	if (lmp_le_capable(hdev)) {
836 		settings |= MGMT_SETTING_LE;
837 		settings |= MGMT_SETTING_SECURE_CONN;
838 		settings |= MGMT_SETTING_PRIVACY;
839 		settings |= MGMT_SETTING_STATIC_ADDRESS;
840 		settings |= MGMT_SETTING_ADVERTISING;
841 	}
842 
843 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 		settings |= MGMT_SETTING_CONFIGURATION;
845 
846 	if (cis_central_capable(hdev))
847 		settings |= MGMT_SETTING_CIS_CENTRAL;
848 
849 	if (cis_peripheral_capable(hdev))
850 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
851 
852 	if (ll_privacy_capable(hdev))
853 		settings |= MGMT_SETTING_LL_PRIVACY;
854 
855 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
856 
857 	return settings;
858 }
859 
860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 	u32 settings = 0;
863 
864 	if (hdev_is_powered(hdev))
865 		settings |= MGMT_SETTING_POWERED;
866 
867 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 		settings |= MGMT_SETTING_CONNECTABLE;
869 
870 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 		settings |= MGMT_SETTING_DISCOVERABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 		settings |= MGMT_SETTING_BONDABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 		settings |= MGMT_SETTING_BREDR;
881 
882 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 		settings |= MGMT_SETTING_LE;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 		settings |= MGMT_SETTING_LINK_SECURITY;
887 
888 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 		settings |= MGMT_SETTING_SSP;
890 
891 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 		settings |= MGMT_SETTING_ADVERTISING;
893 
894 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 		settings |= MGMT_SETTING_SECURE_CONN;
896 
897 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 		settings |= MGMT_SETTING_DEBUG_KEYS;
899 
900 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 		settings |= MGMT_SETTING_PRIVACY;
902 
903 	/* The current setting for static address has two purposes. The
904 	 * first is to indicate if the static address will be used and
905 	 * the second is to indicate if it is actually set.
906 	 *
907 	 * This means if the static address is not configured, this flag
908 	 * will never be set. If the address is configured, then if the
909 	 * address is actually used decides if the flag is set or not.
910 	 *
911 	 * For single mode LE only controllers and dual-mode controllers
912 	 * with BR/EDR disabled, the existence of the static address will
913 	 * be evaluated.
914 	 */
915 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 			settings |= MGMT_SETTING_STATIC_ADDRESS;
920 	}
921 
922 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924 
925 	if (cis_central_enabled(hdev))
926 		settings |= MGMT_SETTING_CIS_CENTRAL;
927 
928 	if (cis_peripheral_enabled(hdev))
929 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
930 
931 	if (bis_enabled(hdev))
932 		settings |= MGMT_SETTING_ISO_BROADCASTER;
933 
934 	if (sync_recv_enabled(hdev))
935 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936 
937 	if (ll_privacy_enabled(hdev))
938 		settings |= MGMT_SETTING_LL_PRIVACY;
939 
940 	return settings;
941 }
942 
943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947 
948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 	struct mgmt_pending_cmd *cmd;
951 
952 	/* If there's a pending mgmt command the flags will not yet have
953 	 * their final values, so check for this first.
954 	 */
955 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 	if (cmd) {
957 		struct mgmt_mode *cp = cmd->param;
958 		if (cp->val == 0x01)
959 			return LE_AD_GENERAL;
960 		else if (cp->val == 0x02)
961 			return LE_AD_LIMITED;
962 	} else {
963 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 			return LE_AD_LIMITED;
965 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 			return LE_AD_GENERAL;
967 	}
968 
969 	return 0;
970 }
971 
972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 	struct mgmt_pending_cmd *cmd;
975 
976 	/* If there's a pending mgmt command the flag will not yet have
977 	 * it's final value, so check for this first.
978 	 */
979 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 	if (cmd) {
981 		struct mgmt_mode *cp = cmd->param;
982 
983 		return cp->val;
984 	}
985 
986 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988 
989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 	hci_update_eir_sync(hdev);
992 	hci_update_class_sync(hdev);
993 
994 	return 0;
995 }
996 
997 static void service_cache_off(struct work_struct *work)
998 {
999 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 					    service_cache.work);
1001 
1002 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 		return;
1004 
1005 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007 
1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 	/* The generation of a new RPA and programming it into the
1011 	 * controller happens in the hci_req_enable_advertising()
1012 	 * function.
1013 	 */
1014 	if (ext_adv_capable(hdev))
1015 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 	else
1017 		return hci_enable_advertising_sync(hdev);
1018 }
1019 
1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 					    rpa_expired.work);
1024 
1025 	bt_dev_dbg(hdev, "");
1026 
1027 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028 
1029 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 		return;
1031 
1032 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034 
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036 
1037 static void discov_off(struct work_struct *work)
1038 {
1039 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 					    discov_off.work);
1041 
1042 	bt_dev_dbg(hdev, "");
1043 
1044 	hci_dev_lock(hdev);
1045 
1046 	/* When discoverable timeout triggers, then just make sure
1047 	 * the limited discoverable flag is cleared. Even in the case
1048 	 * of a timeout triggered from general discoverable, it is
1049 	 * safe to unconditionally clear the flag.
1050 	 */
1051 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 	hdev->discov_timeout = 0;
1054 
1055 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056 
1057 	mgmt_new_settings(hdev);
1058 
1059 	hci_dev_unlock(hdev);
1060 }
1061 
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063 
1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 	u8 handle = mesh_tx->handle;
1068 
1069 	if (!silent)
1070 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 			   sizeof(handle), NULL);
1072 
1073 	mgmt_mesh_remove(mesh_tx);
1074 }
1075 
1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 	struct mgmt_mesh_tx *mesh_tx;
1079 
1080 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 	if (list_empty(&hdev->adv_instances))
1082 		hci_disable_advertising_sync(hdev);
1083 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1084 
1085 	if (mesh_tx)
1086 		mesh_send_complete(hdev, mesh_tx, false);
1087 
1088 	return 0;
1089 }
1090 
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096 
1097 	if (!mesh_tx)
1098 		return;
1099 
1100 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 				 mesh_send_start_complete);
1102 
1103 	if (err < 0)
1104 		mesh_send_complete(hdev, mesh_tx, false);
1105 	else
1106 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108 
1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 					    mesh_send_done.work);
1113 
1114 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 		return;
1116 
1117 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119 
1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 		return;
1124 
1125 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126 
1127 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131 
1132 	/* Non-mgmt controlled devices get this bit set
1133 	 * implicitly so that pairing works for them, however
1134 	 * for mgmt we require user-space to explicitly enable
1135 	 * it
1136 	 */
1137 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138 
1139 	hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141 
1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 				void *data, u16 data_len)
1144 {
1145 	struct mgmt_rp_read_info rp;
1146 
1147 	bt_dev_dbg(hdev, "sock %p", sk);
1148 
1149 	hci_dev_lock(hdev);
1150 
1151 	memset(&rp, 0, sizeof(rp));
1152 
1153 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1154 
1155 	rp.version = hdev->hci_ver;
1156 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157 
1158 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160 
1161 	memcpy(rp.dev_class, hdev->dev_class, 3);
1162 
1163 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165 
1166 	hci_dev_unlock(hdev);
1167 
1168 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 				 sizeof(rp));
1170 }
1171 
1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 	u16 eir_len = 0;
1175 	size_t name_len;
1176 
1177 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 					  hdev->dev_class, 3);
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 					  hdev->appearance);
1184 
1185 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 				  hdev->dev_name, name_len);
1188 
1189 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 				  hdev->short_name, name_len);
1192 
1193 	return eir_len;
1194 }
1195 
1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 				    void *data, u16 data_len)
1198 {
1199 	char buf[512];
1200 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 	u16 eir_len;
1202 
1203 	bt_dev_dbg(hdev, "sock %p", sk);
1204 
1205 	memset(&buf, 0, sizeof(buf));
1206 
1207 	hci_dev_lock(hdev);
1208 
1209 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1210 
1211 	rp->version = hdev->hci_ver;
1212 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213 
1214 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216 
1217 
1218 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 	rp->eir_len = cpu_to_le16(eir_len);
1220 
1221 	hci_dev_unlock(hdev);
1222 
1223 	/* If this command is called at least once, then the events
1224 	 * for class of device and local name changes are disabled
1225 	 * and only the new extended controller information event
1226 	 * is used.
1227 	 */
1228 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231 
1232 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 				 sizeof(*rp) + eir_len);
1234 }
1235 
1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 	char buf[512];
1239 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 	u16 eir_len;
1241 
1242 	memset(buf, 0, sizeof(buf));
1243 
1244 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 	ev->eir_len = cpu_to_le16(eir_len);
1246 
1247 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 				  sizeof(*ev) + eir_len,
1249 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251 
1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1255 
1256 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 				 sizeof(settings));
1258 }
1259 
1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 	struct mgmt_ev_advertising_added ev;
1263 
1264 	ev.instance = instance;
1265 
1266 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268 
1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 			      u8 instance)
1271 {
1272 	struct mgmt_ev_advertising_removed ev;
1273 
1274 	ev.instance = instance;
1275 
1276 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278 
1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 	if (hdev->adv_instance_timeout) {
1282 		hdev->adv_instance_timeout = 0;
1283 		cancel_delayed_work(&hdev->adv_instance_expire);
1284 	}
1285 }
1286 
1287 /* This function requires the caller holds hdev->lock */
1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 	struct hci_conn_params *p;
1291 
1292 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 		/* Needed for AUTO_OFF case where might not "really"
1294 		 * have been powered off.
1295 		 */
1296 		hci_pend_le_list_del_init(p);
1297 
1298 		switch (p->auto_connect) {
1299 		case HCI_AUTO_CONN_DIRECT:
1300 		case HCI_AUTO_CONN_ALWAYS:
1301 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 			break;
1303 		case HCI_AUTO_CONN_REPORT:
1304 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 			break;
1306 		default:
1307 			break;
1308 		}
1309 	}
1310 }
1311 
1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1315 
1316 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319 
1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 	struct mgmt_pending_cmd *cmd = data;
1323 	struct mgmt_mode *cp;
1324 
1325 	/* Make sure cmd still outstanding. */
1326 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1327 		return;
1328 
1329 	cp = cmd->param;
1330 
1331 	bt_dev_dbg(hdev, "err %d", err);
1332 
1333 	if (!err) {
1334 		if (cp->val) {
1335 			hci_dev_lock(hdev);
1336 			restart_le_actions(hdev);
1337 			hci_update_passive_scan(hdev);
1338 			hci_dev_unlock(hdev);
1339 		}
1340 
1341 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1342 
1343 		/* Only call new_setting for power on as power off is deferred
1344 		 * to hdev->power_off work which does call hci_dev_do_close.
1345 		 */
1346 		if (cp->val)
1347 			new_settings(hdev, cmd->sk);
1348 	} else {
1349 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1350 				mgmt_status(err));
1351 	}
1352 
1353 	mgmt_pending_free(cmd);
1354 }
1355 
1356 static int set_powered_sync(struct hci_dev *hdev, void *data)
1357 {
1358 	struct mgmt_pending_cmd *cmd = data;
1359 	struct mgmt_mode cp;
1360 
1361 	mutex_lock(&hdev->mgmt_pending_lock);
1362 
1363 	/* Make sure cmd still outstanding. */
1364 	if (!__mgmt_pending_listed(hdev, cmd)) {
1365 		mutex_unlock(&hdev->mgmt_pending_lock);
1366 		return -ECANCELED;
1367 	}
1368 
1369 	memcpy(&cp, cmd->param, sizeof(cp));
1370 
1371 	mutex_unlock(&hdev->mgmt_pending_lock);
1372 
1373 	BT_DBG("%s", hdev->name);
1374 
1375 	return hci_set_powered_sync(hdev, cp.val);
1376 }
1377 
1378 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 		       u16 len)
1380 {
1381 	struct mgmt_mode *cp = data;
1382 	struct mgmt_pending_cmd *cmd;
1383 	int err;
1384 
1385 	bt_dev_dbg(hdev, "sock %p", sk);
1386 
1387 	if (cp->val != 0x00 && cp->val != 0x01)
1388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389 				       MGMT_STATUS_INVALID_PARAMS);
1390 
1391 	hci_dev_lock(hdev);
1392 
1393 	if (!cp->val) {
1394 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1395 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1396 					      MGMT_STATUS_BUSY);
1397 			goto failed;
1398 		}
1399 	}
1400 
1401 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1402 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1403 				      MGMT_STATUS_BUSY);
1404 		goto failed;
1405 	}
1406 
1407 	if (!!cp->val == hdev_is_powered(hdev)) {
1408 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1409 		goto failed;
1410 	}
1411 
1412 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1413 	if (!cmd) {
1414 		err = -ENOMEM;
1415 		goto failed;
1416 	}
1417 
1418 	/* Cancel potentially blocking sync operation before power off */
1419 	if (cp->val == 0x00) {
1420 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1421 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1422 					 mgmt_set_powered_complete);
1423 	} else {
1424 		/* Use hci_cmd_sync_submit since hdev might not be running */
1425 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1426 					  mgmt_set_powered_complete);
1427 	}
1428 
1429 	if (err < 0)
1430 		mgmt_pending_remove(cmd);
1431 
1432 failed:
1433 	hci_dev_unlock(hdev);
1434 	return err;
1435 }
1436 
1437 int mgmt_new_settings(struct hci_dev *hdev)
1438 {
1439 	return new_settings(hdev, NULL);
1440 }
1441 
1442 struct cmd_lookup {
1443 	struct sock *sk;
1444 	struct hci_dev *hdev;
1445 	u8 mgmt_status;
1446 };
1447 
1448 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	struct cmd_lookup *match = data;
1451 
1452 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1453 
1454 	if (match->sk == NULL) {
1455 		match->sk = cmd->sk;
1456 		sock_hold(match->sk);
1457 	}
1458 }
1459 
1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 	u8 *status = data;
1463 
1464 	mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1465 }
1466 
1467 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 {
1469 	struct cmd_lookup *match = data;
1470 
1471 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1472 	 * removed/freed.
1473 	 */
1474 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1475 
1476 	if (cmd->cmd_complete) {
1477 		cmd->cmd_complete(cmd, match->mgmt_status);
1478 		return;
1479 	}
1480 
1481 	cmd_status_rsp(cmd, data);
1482 }
1483 
1484 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1485 {
1486 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1487 				 cmd->param, cmd->param_len);
1488 }
1489 
1490 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1491 {
1492 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1493 				 cmd->param, sizeof(struct mgmt_addr_info));
1494 }
1495 
1496 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1497 {
1498 	if (!lmp_bredr_capable(hdev))
1499 		return MGMT_STATUS_NOT_SUPPORTED;
1500 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1501 		return MGMT_STATUS_REJECTED;
1502 	else
1503 		return MGMT_STATUS_SUCCESS;
1504 }
1505 
1506 static u8 mgmt_le_support(struct hci_dev *hdev)
1507 {
1508 	if (!lmp_le_capable(hdev))
1509 		return MGMT_STATUS_NOT_SUPPORTED;
1510 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1511 		return MGMT_STATUS_REJECTED;
1512 	else
1513 		return MGMT_STATUS_SUCCESS;
1514 }
1515 
1516 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1517 					   int err)
1518 {
1519 	struct mgmt_pending_cmd *cmd = data;
1520 
1521 	bt_dev_dbg(hdev, "err %d", err);
1522 
1523 	/* Make sure cmd still outstanding. */
1524 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1525 		return;
1526 
1527 	hci_dev_lock(hdev);
1528 
1529 	if (err) {
1530 		u8 mgmt_err = mgmt_status(err);
1531 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1532 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1533 		goto done;
1534 	}
1535 
1536 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1537 	    hdev->discov_timeout > 0) {
1538 		int to = secs_to_jiffies(hdev->discov_timeout);
1539 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1540 	}
1541 
1542 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1543 	new_settings(hdev, cmd->sk);
1544 
1545 done:
1546 	mgmt_pending_free(cmd);
1547 	hci_dev_unlock(hdev);
1548 }
1549 
1550 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1551 {
1552 	if (!mgmt_pending_listed(hdev, data))
1553 		return -ECANCELED;
1554 
1555 	BT_DBG("%s", hdev->name);
1556 
1557 	return hci_update_discoverable_sync(hdev);
1558 }
1559 
1560 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1561 			    u16 len)
1562 {
1563 	struct mgmt_cp_set_discoverable *cp = data;
1564 	struct mgmt_pending_cmd *cmd;
1565 	u16 timeout;
1566 	int err;
1567 
1568 	bt_dev_dbg(hdev, "sock %p", sk);
1569 
1570 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1571 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				       MGMT_STATUS_REJECTED);
1574 
1575 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1577 				       MGMT_STATUS_INVALID_PARAMS);
1578 
1579 	timeout = __le16_to_cpu(cp->timeout);
1580 
1581 	/* Disabling discoverable requires that no timeout is set,
1582 	 * and enabling limited discoverable requires a timeout.
1583 	 */
1584 	if ((cp->val == 0x00 && timeout > 0) ||
1585 	    (cp->val == 0x02 && timeout == 0))
1586 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				       MGMT_STATUS_INVALID_PARAMS);
1588 
1589 	hci_dev_lock(hdev);
1590 
1591 	if (!hdev_is_powered(hdev) && timeout > 0) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 				      MGMT_STATUS_NOT_POWERED);
1594 		goto failed;
1595 	}
1596 
1597 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1598 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1599 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 				      MGMT_STATUS_BUSY);
1601 		goto failed;
1602 	}
1603 
1604 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1605 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606 				      MGMT_STATUS_REJECTED);
1607 		goto failed;
1608 	}
1609 
1610 	if (hdev->advertising_paused) {
1611 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1612 				      MGMT_STATUS_BUSY);
1613 		goto failed;
1614 	}
1615 
1616 	if (!hdev_is_powered(hdev)) {
1617 		bool changed = false;
1618 
1619 		/* Setting limited discoverable when powered off is
1620 		 * not a valid operation since it requires a timeout
1621 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1622 		 */
1623 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1624 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1625 			changed = true;
1626 		}
1627 
1628 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 		if (err < 0)
1630 			goto failed;
1631 
1632 		if (changed)
1633 			err = new_settings(hdev, sk);
1634 
1635 		goto failed;
1636 	}
1637 
1638 	/* If the current mode is the same, then just update the timeout
1639 	 * value with the new value. And if only the timeout gets updated,
1640 	 * then no need for any HCI transactions.
1641 	 */
1642 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1643 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1644 						   HCI_LIMITED_DISCOVERABLE)) {
1645 		cancel_delayed_work(&hdev->discov_off);
1646 		hdev->discov_timeout = timeout;
1647 
1648 		if (cp->val && hdev->discov_timeout > 0) {
1649 			int to = secs_to_jiffies(hdev->discov_timeout);
1650 			queue_delayed_work(hdev->req_workqueue,
1651 					   &hdev->discov_off, to);
1652 		}
1653 
1654 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1655 		goto failed;
1656 	}
1657 
1658 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1659 	if (!cmd) {
1660 		err = -ENOMEM;
1661 		goto failed;
1662 	}
1663 
1664 	/* Cancel any potential discoverable timeout that might be
1665 	 * still active and store new timeout value. The arming of
1666 	 * the timeout happens in the complete handler.
1667 	 */
1668 	cancel_delayed_work(&hdev->discov_off);
1669 	hdev->discov_timeout = timeout;
1670 
1671 	if (cp->val)
1672 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1673 	else
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 
1676 	/* Limited discoverable mode */
1677 	if (cp->val == 0x02)
1678 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1679 	else
1680 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1681 
1682 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1683 				 mgmt_set_discoverable_complete);
1684 
1685 	if (err < 0)
1686 		mgmt_pending_remove(cmd);
1687 
1688 failed:
1689 	hci_dev_unlock(hdev);
1690 	return err;
1691 }
1692 
1693 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1694 					  int err)
1695 {
1696 	struct mgmt_pending_cmd *cmd = data;
1697 
1698 	bt_dev_dbg(hdev, "err %d", err);
1699 
1700 	/* Make sure cmd still outstanding. */
1701 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1702 		return;
1703 
1704 	hci_dev_lock(hdev);
1705 
1706 	if (err) {
1707 		u8 mgmt_err = mgmt_status(err);
1708 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1709 		goto done;
1710 	}
1711 
1712 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 	new_settings(hdev, cmd->sk);
1714 
1715 done:
1716 	mgmt_pending_free(cmd);
1717 
1718 	hci_dev_unlock(hdev);
1719 }
1720 
1721 static int set_connectable_update_settings(struct hci_dev *hdev,
1722 					   struct sock *sk, u8 val)
1723 {
1724 	bool changed = false;
1725 	int err;
1726 
1727 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1728 		changed = true;
1729 
1730 	if (val) {
1731 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1732 	} else {
1733 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1734 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1735 	}
1736 
1737 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1738 	if (err < 0)
1739 		return err;
1740 
1741 	if (changed) {
1742 		hci_update_scan(hdev);
1743 		hci_update_passive_scan(hdev);
1744 		return new_settings(hdev, sk);
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1751 {
1752 	if (!mgmt_pending_listed(hdev, data))
1753 		return -ECANCELED;
1754 
1755 	BT_DBG("%s", hdev->name);
1756 
1757 	return hci_update_connectable_sync(hdev);
1758 }
1759 
1760 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 			   u16 len)
1762 {
1763 	struct mgmt_mode *cp = data;
1764 	struct mgmt_pending_cmd *cmd;
1765 	int err;
1766 
1767 	bt_dev_dbg(hdev, "sock %p", sk);
1768 
1769 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1770 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1771 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				       MGMT_STATUS_REJECTED);
1773 
1774 	if (cp->val != 0x00 && cp->val != 0x01)
1775 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1776 				       MGMT_STATUS_INVALID_PARAMS);
1777 
1778 	hci_dev_lock(hdev);
1779 
1780 	if (!hdev_is_powered(hdev)) {
1781 		err = set_connectable_update_settings(hdev, sk, cp->val);
1782 		goto failed;
1783 	}
1784 
1785 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1786 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1787 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1788 				      MGMT_STATUS_BUSY);
1789 		goto failed;
1790 	}
1791 
1792 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1793 	if (!cmd) {
1794 		err = -ENOMEM;
1795 		goto failed;
1796 	}
1797 
1798 	if (cp->val) {
1799 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1800 	} else {
1801 		if (hdev->discov_timeout > 0)
1802 			cancel_delayed_work(&hdev->discov_off);
1803 
1804 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1806 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1807 	}
1808 
1809 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1810 				 mgmt_set_connectable_complete);
1811 
1812 	if (err < 0)
1813 		mgmt_pending_remove(cmd);
1814 
1815 failed:
1816 	hci_dev_unlock(hdev);
1817 	return err;
1818 }
1819 
1820 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1821 			u16 len)
1822 {
1823 	struct mgmt_mode *cp = data;
1824 	bool changed;
1825 	int err;
1826 
1827 	bt_dev_dbg(hdev, "sock %p", sk);
1828 
1829 	if (cp->val != 0x00 && cp->val != 0x01)
1830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1831 				       MGMT_STATUS_INVALID_PARAMS);
1832 
1833 	hci_dev_lock(hdev);
1834 
1835 	if (cp->val)
1836 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1837 	else
1838 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1839 
1840 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1841 	if (err < 0)
1842 		goto unlock;
1843 
1844 	if (changed) {
1845 		/* In limited privacy mode the change of bondable mode
1846 		 * may affect the local advertising address.
1847 		 */
1848 		hci_update_discoverable(hdev);
1849 
1850 		err = new_settings(hdev, sk);
1851 	}
1852 
1853 unlock:
1854 	hci_dev_unlock(hdev);
1855 	return err;
1856 }
1857 
1858 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1859 			     u16 len)
1860 {
1861 	struct mgmt_mode *cp = data;
1862 	struct mgmt_pending_cmd *cmd;
1863 	u8 val, status;
1864 	int err;
1865 
1866 	bt_dev_dbg(hdev, "sock %p", sk);
1867 
1868 	status = mgmt_bredr_support(hdev);
1869 	if (status)
1870 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 				       status);
1872 
1873 	if (cp->val != 0x00 && cp->val != 0x01)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1875 				       MGMT_STATUS_INVALID_PARAMS);
1876 
1877 	hci_dev_lock(hdev);
1878 
1879 	if (!hdev_is_powered(hdev)) {
1880 		bool changed = false;
1881 
1882 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1883 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1884 			changed = true;
1885 		}
1886 
1887 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1888 		if (err < 0)
1889 			goto failed;
1890 
1891 		if (changed)
1892 			err = new_settings(hdev, sk);
1893 
1894 		goto failed;
1895 	}
1896 
1897 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1898 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1899 				      MGMT_STATUS_BUSY);
1900 		goto failed;
1901 	}
1902 
1903 	val = !!cp->val;
1904 
1905 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1906 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1907 		goto failed;
1908 	}
1909 
1910 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1911 	if (!cmd) {
1912 		err = -ENOMEM;
1913 		goto failed;
1914 	}
1915 
1916 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1917 	if (err < 0) {
1918 		mgmt_pending_remove(cmd);
1919 		goto failed;
1920 	}
1921 
1922 failed:
1923 	hci_dev_unlock(hdev);
1924 	return err;
1925 }
1926 
1927 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1928 {
1929 	struct cmd_lookup match = { NULL, hdev };
1930 	struct mgmt_pending_cmd *cmd = data;
1931 	struct mgmt_mode *cp;
1932 	u8 enable;
1933 	bool changed;
1934 
1935 	/* Make sure cmd still outstanding. */
1936 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1937 		return;
1938 
1939 	cp = cmd->param;
1940 	enable = cp->val;
1941 
1942 	if (err) {
1943 		u8 mgmt_err = mgmt_status(err);
1944 
1945 		if (enable && hci_dev_test_and_clear_flag(hdev,
1946 							  HCI_SSP_ENABLED)) {
1947 			new_settings(hdev, NULL);
1948 		}
1949 
1950 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1951 		return;
1952 	}
1953 
1954 	if (enable) {
1955 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956 	} else {
1957 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1958 	}
1959 
1960 	settings_rsp(cmd, &match);
1961 
1962 	if (changed)
1963 		new_settings(hdev, match.sk);
1964 
1965 	if (match.sk)
1966 		sock_put(match.sk);
1967 
1968 	hci_update_eir_sync(hdev);
1969 }
1970 
1971 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1972 {
1973 	struct mgmt_pending_cmd *cmd = data;
1974 	struct mgmt_mode cp;
1975 	bool changed = false;
1976 	int err;
1977 
1978 	mutex_lock(&hdev->mgmt_pending_lock);
1979 
1980 	if (!__mgmt_pending_listed(hdev, cmd)) {
1981 		mutex_unlock(&hdev->mgmt_pending_lock);
1982 		return -ECANCELED;
1983 	}
1984 
1985 	memcpy(&cp, cmd->param, sizeof(cp));
1986 
1987 	mutex_unlock(&hdev->mgmt_pending_lock);
1988 
1989 	if (cp.val)
1990 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1991 
1992 	err = hci_write_ssp_mode_sync(hdev, cp.val);
1993 
1994 	if (!err && changed)
1995 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1996 
1997 	return err;
1998 }
1999 
2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2001 {
2002 	struct mgmt_mode *cp = data;
2003 	struct mgmt_pending_cmd *cmd;
2004 	u8 status;
2005 	int err;
2006 
2007 	bt_dev_dbg(hdev, "sock %p", sk);
2008 
2009 	status = mgmt_bredr_support(hdev);
2010 	if (status)
2011 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2012 
2013 	if (!lmp_ssp_capable(hdev))
2014 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 				       MGMT_STATUS_NOT_SUPPORTED);
2016 
2017 	if (cp->val != 0x00 && cp->val != 0x01)
2018 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 				       MGMT_STATUS_INVALID_PARAMS);
2020 
2021 	hci_dev_lock(hdev);
2022 
2023 	if (!hdev_is_powered(hdev)) {
2024 		bool changed;
2025 
2026 		if (cp->val) {
2027 			changed = !hci_dev_test_and_set_flag(hdev,
2028 							     HCI_SSP_ENABLED);
2029 		} else {
2030 			changed = hci_dev_test_and_clear_flag(hdev,
2031 							      HCI_SSP_ENABLED);
2032 		}
2033 
2034 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2035 		if (err < 0)
2036 			goto failed;
2037 
2038 		if (changed)
2039 			err = new_settings(hdev, sk);
2040 
2041 		goto failed;
2042 	}
2043 
2044 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2045 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2046 				      MGMT_STATUS_BUSY);
2047 		goto failed;
2048 	}
2049 
2050 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2051 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2052 		goto failed;
2053 	}
2054 
2055 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2056 	if (!cmd)
2057 		err = -ENOMEM;
2058 	else
2059 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2060 					 set_ssp_complete);
2061 
2062 	if (err < 0) {
2063 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2064 				      MGMT_STATUS_FAILED);
2065 
2066 		if (cmd)
2067 			mgmt_pending_remove(cmd);
2068 	}
2069 
2070 failed:
2071 	hci_dev_unlock(hdev);
2072 	return err;
2073 }
2074 
2075 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2076 {
2077 	bt_dev_dbg(hdev, "sock %p", sk);
2078 
2079 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 				       MGMT_STATUS_NOT_SUPPORTED);
2081 }
2082 
2083 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2084 {
2085 	struct mgmt_pending_cmd *cmd = data;
2086 	struct cmd_lookup match = { NULL, hdev };
2087 	u8 status = mgmt_status(err);
2088 
2089 	bt_dev_dbg(hdev, "err %d", err);
2090 
2091 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2092 		return;
2093 
2094 	if (status) {
2095 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2096 		goto done;
2097 	}
2098 
2099 	settings_rsp(cmd, &match);
2100 
2101 	new_settings(hdev, match.sk);
2102 
2103 	if (match.sk)
2104 		sock_put(match.sk);
2105 
2106 done:
2107 	mgmt_pending_free(cmd);
2108 }
2109 
2110 static int set_le_sync(struct hci_dev *hdev, void *data)
2111 {
2112 	struct mgmt_pending_cmd *cmd = data;
2113 	struct mgmt_mode cp;
2114 	u8 val;
2115 	int err;
2116 
2117 	mutex_lock(&hdev->mgmt_pending_lock);
2118 
2119 	if (!__mgmt_pending_listed(hdev, cmd)) {
2120 		mutex_unlock(&hdev->mgmt_pending_lock);
2121 		return -ECANCELED;
2122 	}
2123 
2124 	memcpy(&cp, cmd->param, sizeof(cp));
2125 	val = !!cp.val;
2126 
2127 	mutex_unlock(&hdev->mgmt_pending_lock);
2128 
2129 	if (!val) {
2130 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2131 
2132 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2133 			hci_disable_advertising_sync(hdev);
2134 
2135 		if (ext_adv_capable(hdev))
2136 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2137 	} else {
2138 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2139 	}
2140 
2141 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2142 
2143 	/* Make sure the controller has a good default for
2144 	 * advertising data. Restrict the update to when LE
2145 	 * has actually been enabled. During power on, the
2146 	 * update in powered_update_hci will take care of it.
2147 	 */
2148 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2149 		if (ext_adv_capable(hdev)) {
2150 			int status;
2151 
2152 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2153 			if (!status)
2154 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2155 		} else {
2156 			hci_update_adv_data_sync(hdev, 0x00);
2157 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2158 		}
2159 
2160 		hci_update_passive_scan(hdev);
2161 	}
2162 
2163 	return err;
2164 }
2165 
2166 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2167 {
2168 	struct mgmt_pending_cmd *cmd = data;
2169 	u8 status = mgmt_status(err);
2170 	struct sock *sk;
2171 
2172 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2173 		return;
2174 
2175 	sk = cmd->sk;
2176 
2177 	if (status) {
2178 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2179 				     cmd_status_rsp, &status);
2180 		return;
2181 	}
2182 
2183 	mgmt_pending_remove(cmd);
2184 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2185 }
2186 
2187 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2188 {
2189 	struct mgmt_pending_cmd *cmd = data;
2190 	struct mgmt_cp_set_mesh cp;
2191 	size_t len;
2192 
2193 	mutex_lock(&hdev->mgmt_pending_lock);
2194 
2195 	if (!__mgmt_pending_listed(hdev, cmd)) {
2196 		mutex_unlock(&hdev->mgmt_pending_lock);
2197 		return -ECANCELED;
2198 	}
2199 
2200 	memcpy(&cp, cmd->param, sizeof(cp));
2201 
2202 	mutex_unlock(&hdev->mgmt_pending_lock);
2203 
2204 	len = cmd->param_len;
2205 
2206 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2207 
2208 	if (cp.enable)
2209 		hci_dev_set_flag(hdev, HCI_MESH);
2210 	else
2211 		hci_dev_clear_flag(hdev, HCI_MESH);
2212 
2213 	hdev->le_scan_interval = __le16_to_cpu(cp.period);
2214 	hdev->le_scan_window = __le16_to_cpu(cp.window);
2215 
2216 	len -= sizeof(cp);
2217 
2218 	/* If filters don't fit, forward all adv pkts */
2219 	if (len <= sizeof(hdev->mesh_ad_types))
2220 		memcpy(hdev->mesh_ad_types, cp.ad_types, len);
2221 
2222 	hci_update_passive_scan_sync(hdev);
2223 	return 0;
2224 }
2225 
2226 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2227 {
2228 	struct mgmt_cp_set_mesh *cp = data;
2229 	struct mgmt_pending_cmd *cmd;
2230 	__u16 period, window;
2231 	int err = 0;
2232 
2233 	bt_dev_dbg(hdev, "sock %p", sk);
2234 
2235 	if (!lmp_le_capable(hdev) ||
2236 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2237 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2238 				       MGMT_STATUS_NOT_SUPPORTED);
2239 
2240 	if (cp->enable != 0x00 && cp->enable != 0x01)
2241 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2242 				       MGMT_STATUS_INVALID_PARAMS);
2243 
2244 	/* Keep allowed ranges in sync with set_scan_params() */
2245 	period = __le16_to_cpu(cp->period);
2246 
2247 	if (period < 0x0004 || period > 0x4000)
2248 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2249 				       MGMT_STATUS_INVALID_PARAMS);
2250 
2251 	window = __le16_to_cpu(cp->window);
2252 
2253 	if (window < 0x0004 || window > 0x4000)
2254 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2255 				       MGMT_STATUS_INVALID_PARAMS);
2256 
2257 	if (window > period)
2258 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2259 				       MGMT_STATUS_INVALID_PARAMS);
2260 
2261 	hci_dev_lock(hdev);
2262 
2263 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2264 	if (!cmd)
2265 		err = -ENOMEM;
2266 	else
2267 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2268 					 set_mesh_complete);
2269 
2270 	if (err < 0) {
2271 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2272 				      MGMT_STATUS_FAILED);
2273 
2274 		if (cmd)
2275 			mgmt_pending_remove(cmd);
2276 	}
2277 
2278 	hci_dev_unlock(hdev);
2279 	return err;
2280 }
2281 
2282 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2283 {
2284 	struct mgmt_mesh_tx *mesh_tx = data;
2285 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2286 	unsigned long mesh_send_interval;
2287 	u8 mgmt_err = mgmt_status(err);
2288 
2289 	/* Report any errors here, but don't report completion */
2290 
2291 	if (mgmt_err) {
2292 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2293 		/* Send Complete Error Code for handle */
2294 		mesh_send_complete(hdev, mesh_tx, false);
2295 		return;
2296 	}
2297 
2298 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2299 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2300 			   mesh_send_interval);
2301 }
2302 
2303 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2304 {
2305 	struct mgmt_mesh_tx *mesh_tx = data;
2306 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2307 	struct adv_info *adv, *next_instance;
2308 	u8 instance = hdev->le_num_of_adv_sets + 1;
2309 	u16 timeout, duration;
2310 	int err = 0;
2311 
2312 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2313 		return MGMT_STATUS_BUSY;
2314 
2315 	timeout = 1000;
2316 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2317 	adv = hci_add_adv_instance(hdev, instance, 0,
2318 				   send->adv_data_len, send->adv_data,
2319 				   0, NULL,
2320 				   timeout, duration,
2321 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2322 				   hdev->le_adv_min_interval,
2323 				   hdev->le_adv_max_interval,
2324 				   mesh_tx->handle);
2325 
2326 	if (!IS_ERR(adv))
2327 		mesh_tx->instance = instance;
2328 	else
2329 		err = PTR_ERR(adv);
2330 
2331 	if (hdev->cur_adv_instance == instance) {
2332 		/* If the currently advertised instance is being changed then
2333 		 * cancel the current advertising and schedule the next
2334 		 * instance. If there is only one instance then the overridden
2335 		 * advertising data will be visible right away.
2336 		 */
2337 		cancel_adv_timeout(hdev);
2338 
2339 		next_instance = hci_get_next_instance(hdev, instance);
2340 		if (next_instance)
2341 			instance = next_instance->instance;
2342 		else
2343 			instance = 0;
2344 	} else if (hdev->adv_instance_timeout) {
2345 		/* Immediately advertise the new instance if no other, or
2346 		 * let it go naturally from queue if ADV is already happening
2347 		 */
2348 		instance = 0;
2349 	}
2350 
2351 	if (instance)
2352 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2353 
2354 	return err;
2355 }
2356 
2357 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2358 {
2359 	struct mgmt_rp_mesh_read_features *rp = data;
2360 
2361 	if (rp->used_handles >= rp->max_handles)
2362 		return;
2363 
2364 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2365 }
2366 
2367 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2368 			 void *data, u16 len)
2369 {
2370 	struct mgmt_rp_mesh_read_features rp;
2371 
2372 	if (!lmp_le_capable(hdev) ||
2373 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2375 				       MGMT_STATUS_NOT_SUPPORTED);
2376 
2377 	memset(&rp, 0, sizeof(rp));
2378 	rp.index = cpu_to_le16(hdev->id);
2379 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2380 		rp.max_handles = MESH_HANDLES_MAX;
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	if (rp.max_handles)
2385 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2386 
2387 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2388 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2389 
2390 	hci_dev_unlock(hdev);
2391 	return 0;
2392 }
2393 
2394 static int send_cancel(struct hci_dev *hdev, void *data)
2395 {
2396 	struct mgmt_pending_cmd *cmd = data;
2397 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2398 	struct mgmt_mesh_tx *mesh_tx;
2399 
2400 	if (!cancel->handle) {
2401 		do {
2402 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2403 
2404 			if (mesh_tx)
2405 				mesh_send_complete(hdev, mesh_tx, false);
2406 		} while (mesh_tx);
2407 	} else {
2408 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2409 
2410 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2411 			mesh_send_complete(hdev, mesh_tx, false);
2412 	}
2413 
2414 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2415 			  0, NULL, 0);
2416 	mgmt_pending_free(cmd);
2417 
2418 	return 0;
2419 }
2420 
2421 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2422 			    void *data, u16 len)
2423 {
2424 	struct mgmt_pending_cmd *cmd;
2425 	int err;
2426 
2427 	if (!lmp_le_capable(hdev) ||
2428 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2429 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2430 				       MGMT_STATUS_NOT_SUPPORTED);
2431 
2432 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2434 				       MGMT_STATUS_REJECTED);
2435 
2436 	hci_dev_lock(hdev);
2437 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2438 	if (!cmd)
2439 		err = -ENOMEM;
2440 	else
2441 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2442 
2443 	if (err < 0) {
2444 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2445 				      MGMT_STATUS_FAILED);
2446 
2447 		if (cmd)
2448 			mgmt_pending_free(cmd);
2449 	}
2450 
2451 	hci_dev_unlock(hdev);
2452 	return err;
2453 }
2454 
2455 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2456 {
2457 	struct mgmt_mesh_tx *mesh_tx;
2458 	struct mgmt_cp_mesh_send *send = data;
2459 	struct mgmt_rp_mesh_read_features rp;
2460 	bool sending;
2461 	int err = 0;
2462 
2463 	if (!lmp_le_capable(hdev) ||
2464 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2465 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 				       MGMT_STATUS_NOT_SUPPORTED);
2467 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2468 	    len <= MGMT_MESH_SEND_SIZE ||
2469 	    len > (MGMT_MESH_SEND_SIZE + 31))
2470 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2471 				       MGMT_STATUS_REJECTED);
2472 
2473 	hci_dev_lock(hdev);
2474 
2475 	memset(&rp, 0, sizeof(rp));
2476 	rp.max_handles = MESH_HANDLES_MAX;
2477 
2478 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2479 
2480 	if (rp.max_handles <= rp.used_handles) {
2481 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2482 				      MGMT_STATUS_BUSY);
2483 		goto done;
2484 	}
2485 
2486 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2487 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2488 
2489 	if (!mesh_tx)
2490 		err = -ENOMEM;
2491 	else if (!sending)
2492 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2493 					 mesh_send_start_complete);
2494 
2495 	if (err < 0) {
2496 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2497 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2498 				      MGMT_STATUS_FAILED);
2499 
2500 		if (mesh_tx) {
2501 			if (sending)
2502 				mgmt_mesh_remove(mesh_tx);
2503 		}
2504 	} else {
2505 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2506 
2507 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2508 				  &mesh_tx->handle, 1);
2509 	}
2510 
2511 done:
2512 	hci_dev_unlock(hdev);
2513 	return err;
2514 }
2515 
2516 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2517 {
2518 	struct mgmt_mode *cp = data;
2519 	struct mgmt_pending_cmd *cmd;
2520 	int err;
2521 	u8 val, enabled;
2522 
2523 	bt_dev_dbg(hdev, "sock %p", sk);
2524 
2525 	if (!lmp_le_capable(hdev))
2526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2527 				       MGMT_STATUS_NOT_SUPPORTED);
2528 
2529 	if (cp->val != 0x00 && cp->val != 0x01)
2530 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2531 				       MGMT_STATUS_INVALID_PARAMS);
2532 
2533 	/* Bluetooth single mode LE only controllers or dual-mode
2534 	 * controllers configured as LE only devices, do not allow
2535 	 * switching LE off. These have either LE enabled explicitly
2536 	 * or BR/EDR has been previously switched off.
2537 	 *
2538 	 * When trying to enable an already enabled LE, then gracefully
2539 	 * send a positive response. Trying to disable it however will
2540 	 * result into rejection.
2541 	 */
2542 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2543 		if (cp->val == 0x01)
2544 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 
2546 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2547 				       MGMT_STATUS_REJECTED);
2548 	}
2549 
2550 	hci_dev_lock(hdev);
2551 
2552 	val = !!cp->val;
2553 	enabled = lmp_host_le_capable(hdev);
2554 
2555 	if (!hdev_is_powered(hdev) || val == enabled) {
2556 		bool changed = false;
2557 
2558 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2559 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2560 			changed = true;
2561 		}
2562 
2563 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2564 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2565 			changed = true;
2566 		}
2567 
2568 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2569 		if (err < 0)
2570 			goto unlock;
2571 
2572 		if (changed)
2573 			err = new_settings(hdev, sk);
2574 
2575 		goto unlock;
2576 	}
2577 
2578 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2579 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2581 				      MGMT_STATUS_BUSY);
2582 		goto unlock;
2583 	}
2584 
2585 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2586 	if (!cmd)
2587 		err = -ENOMEM;
2588 	else
2589 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2590 					 set_le_complete);
2591 
2592 	if (err < 0) {
2593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2594 				      MGMT_STATUS_FAILED);
2595 
2596 		if (cmd)
2597 			mgmt_pending_remove(cmd);
2598 	}
2599 
2600 unlock:
2601 	hci_dev_unlock(hdev);
2602 	return err;
2603 }
2604 
2605 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2606 {
2607 	struct mgmt_pending_cmd *cmd = data;
2608 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2609 	struct sk_buff *skb;
2610 
2611 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2612 				le16_to_cpu(cp->params_len), cp->params,
2613 				cp->event, cp->timeout ?
2614 				secs_to_jiffies(cp->timeout) :
2615 				HCI_CMD_TIMEOUT);
2616 	if (IS_ERR(skb)) {
2617 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2618 				mgmt_status(PTR_ERR(skb)));
2619 		goto done;
2620 	}
2621 
2622 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2623 			  skb->data, skb->len);
2624 
2625 	kfree_skb(skb);
2626 
2627 done:
2628 	mgmt_pending_free(cmd);
2629 
2630 	return 0;
2631 }
2632 
2633 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2634 			     void *data, u16 len)
2635 {
2636 	struct mgmt_cp_hci_cmd_sync *cp = data;
2637 	struct mgmt_pending_cmd *cmd;
2638 	int err;
2639 
2640 	if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2641 		    le16_to_cpu(cp->params_len)))
2642 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2643 				       MGMT_STATUS_INVALID_PARAMS);
2644 
2645 	hci_dev_lock(hdev);
2646 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2647 	if (!cmd)
2648 		err = -ENOMEM;
2649 	else
2650 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2651 
2652 	if (err < 0) {
2653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2654 				      MGMT_STATUS_FAILED);
2655 
2656 		if (cmd)
2657 			mgmt_pending_free(cmd);
2658 	}
2659 
2660 	hci_dev_unlock(hdev);
2661 	return err;
2662 }
2663 
2664 /* This is a helper function to test for pending mgmt commands that can
2665  * cause CoD or EIR HCI commands. We can only allow one such pending
2666  * mgmt command at a time since otherwise we cannot easily track what
2667  * the current values are, will be, and based on that calculate if a new
2668  * HCI command needs to be sent and if yes with what value.
2669  */
2670 static bool pending_eir_or_class(struct hci_dev *hdev)
2671 {
2672 	struct mgmt_pending_cmd *cmd;
2673 
2674 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2675 		switch (cmd->opcode) {
2676 		case MGMT_OP_ADD_UUID:
2677 		case MGMT_OP_REMOVE_UUID:
2678 		case MGMT_OP_SET_DEV_CLASS:
2679 		case MGMT_OP_SET_POWERED:
2680 			return true;
2681 		}
2682 	}
2683 
2684 	return false;
2685 }
2686 
2687 static const u8 bluetooth_base_uuid[] = {
2688 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2689 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2690 };
2691 
2692 static u8 get_uuid_size(const u8 *uuid)
2693 {
2694 	u32 val;
2695 
2696 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2697 		return 128;
2698 
2699 	val = get_unaligned_le32(&uuid[12]);
2700 	if (val > 0xffff)
2701 		return 32;
2702 
2703 	return 16;
2704 }
2705 
2706 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2707 {
2708 	struct mgmt_pending_cmd *cmd = data;
2709 
2710 	bt_dev_dbg(hdev, "err %d", err);
2711 
2712 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2713 			  mgmt_status(err), hdev->dev_class, 3);
2714 
2715 	mgmt_pending_free(cmd);
2716 }
2717 
2718 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2719 {
2720 	int err;
2721 
2722 	err = hci_update_class_sync(hdev);
2723 	if (err)
2724 		return err;
2725 
2726 	return hci_update_eir_sync(hdev);
2727 }
2728 
2729 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2730 {
2731 	struct mgmt_cp_add_uuid *cp = data;
2732 	struct mgmt_pending_cmd *cmd;
2733 	struct bt_uuid *uuid;
2734 	int err;
2735 
2736 	bt_dev_dbg(hdev, "sock %p", sk);
2737 
2738 	hci_dev_lock(hdev);
2739 
2740 	if (pending_eir_or_class(hdev)) {
2741 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2742 				      MGMT_STATUS_BUSY);
2743 		goto failed;
2744 	}
2745 
2746 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2747 	if (!uuid) {
2748 		err = -ENOMEM;
2749 		goto failed;
2750 	}
2751 
2752 	memcpy(uuid->uuid, cp->uuid, 16);
2753 	uuid->svc_hint = cp->svc_hint;
2754 	uuid->size = get_uuid_size(cp->uuid);
2755 
2756 	list_add_tail(&uuid->list, &hdev->uuids);
2757 
2758 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2759 	if (!cmd) {
2760 		err = -ENOMEM;
2761 		goto failed;
2762 	}
2763 
2764 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2765 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2766 	 */
2767 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2768 				  mgmt_class_complete);
2769 	if (err < 0) {
2770 		mgmt_pending_free(cmd);
2771 		goto failed;
2772 	}
2773 
2774 failed:
2775 	hci_dev_unlock(hdev);
2776 	return err;
2777 }
2778 
2779 static bool enable_service_cache(struct hci_dev *hdev)
2780 {
2781 	if (!hdev_is_powered(hdev))
2782 		return false;
2783 
2784 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2785 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2786 				   CACHE_TIMEOUT);
2787 		return true;
2788 	}
2789 
2790 	return false;
2791 }
2792 
2793 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2794 {
2795 	int err;
2796 
2797 	err = hci_update_class_sync(hdev);
2798 	if (err)
2799 		return err;
2800 
2801 	return hci_update_eir_sync(hdev);
2802 }
2803 
2804 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2805 		       u16 len)
2806 {
2807 	struct mgmt_cp_remove_uuid *cp = data;
2808 	struct mgmt_pending_cmd *cmd;
2809 	struct bt_uuid *match, *tmp;
2810 	static const u8 bt_uuid_any[] = {
2811 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2812 	};
2813 	int err, found;
2814 
2815 	bt_dev_dbg(hdev, "sock %p", sk);
2816 
2817 	hci_dev_lock(hdev);
2818 
2819 	if (pending_eir_or_class(hdev)) {
2820 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2821 				      MGMT_STATUS_BUSY);
2822 		goto unlock;
2823 	}
2824 
2825 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2826 		hci_uuids_clear(hdev);
2827 
2828 		if (enable_service_cache(hdev)) {
2829 			err = mgmt_cmd_complete(sk, hdev->id,
2830 						MGMT_OP_REMOVE_UUID,
2831 						0, hdev->dev_class, 3);
2832 			goto unlock;
2833 		}
2834 
2835 		goto update_class;
2836 	}
2837 
2838 	found = 0;
2839 
2840 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2841 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2842 			continue;
2843 
2844 		list_del(&match->list);
2845 		kfree(match);
2846 		found++;
2847 	}
2848 
2849 	if (found == 0) {
2850 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2851 				      MGMT_STATUS_INVALID_PARAMS);
2852 		goto unlock;
2853 	}
2854 
2855 update_class:
2856 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2857 	if (!cmd) {
2858 		err = -ENOMEM;
2859 		goto unlock;
2860 	}
2861 
2862 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2863 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2864 	 */
2865 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2866 				  mgmt_class_complete);
2867 	if (err < 0)
2868 		mgmt_pending_free(cmd);
2869 
2870 unlock:
2871 	hci_dev_unlock(hdev);
2872 	return err;
2873 }
2874 
2875 static int set_class_sync(struct hci_dev *hdev, void *data)
2876 {
2877 	int err = 0;
2878 
2879 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2880 		cancel_delayed_work_sync(&hdev->service_cache);
2881 		err = hci_update_eir_sync(hdev);
2882 	}
2883 
2884 	if (err)
2885 		return err;
2886 
2887 	return hci_update_class_sync(hdev);
2888 }
2889 
2890 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2891 			 u16 len)
2892 {
2893 	struct mgmt_cp_set_dev_class *cp = data;
2894 	struct mgmt_pending_cmd *cmd;
2895 	int err;
2896 
2897 	bt_dev_dbg(hdev, "sock %p", sk);
2898 
2899 	if (!lmp_bredr_capable(hdev))
2900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2901 				       MGMT_STATUS_NOT_SUPPORTED);
2902 
2903 	hci_dev_lock(hdev);
2904 
2905 	if (pending_eir_or_class(hdev)) {
2906 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2907 				      MGMT_STATUS_BUSY);
2908 		goto unlock;
2909 	}
2910 
2911 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2912 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2913 				      MGMT_STATUS_INVALID_PARAMS);
2914 		goto unlock;
2915 	}
2916 
2917 	hdev->major_class = cp->major;
2918 	hdev->minor_class = cp->minor;
2919 
2920 	if (!hdev_is_powered(hdev)) {
2921 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2922 					hdev->dev_class, 3);
2923 		goto unlock;
2924 	}
2925 
2926 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2927 	if (!cmd) {
2928 		err = -ENOMEM;
2929 		goto unlock;
2930 	}
2931 
2932 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2933 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2934 	 */
2935 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2936 				  mgmt_class_complete);
2937 	if (err < 0)
2938 		mgmt_pending_free(cmd);
2939 
2940 unlock:
2941 	hci_dev_unlock(hdev);
2942 	return err;
2943 }
2944 
2945 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2946 			  u16 len)
2947 {
2948 	struct mgmt_cp_load_link_keys *cp = data;
2949 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2950 				   sizeof(struct mgmt_link_key_info));
2951 	u16 key_count, expected_len;
2952 	bool changed;
2953 	int i;
2954 
2955 	bt_dev_dbg(hdev, "sock %p", sk);
2956 
2957 	if (!lmp_bredr_capable(hdev))
2958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2959 				       MGMT_STATUS_NOT_SUPPORTED);
2960 
2961 	key_count = __le16_to_cpu(cp->key_count);
2962 	if (key_count > max_key_count) {
2963 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2964 			   key_count);
2965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2966 				       MGMT_STATUS_INVALID_PARAMS);
2967 	}
2968 
2969 	expected_len = struct_size(cp, keys, key_count);
2970 	if (expected_len != len) {
2971 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2972 			   expected_len, len);
2973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2974 				       MGMT_STATUS_INVALID_PARAMS);
2975 	}
2976 
2977 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2979 				       MGMT_STATUS_INVALID_PARAMS);
2980 
2981 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2982 		   key_count);
2983 
2984 	hci_dev_lock(hdev);
2985 
2986 	hci_link_keys_clear(hdev);
2987 
2988 	if (cp->debug_keys)
2989 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2990 	else
2991 		changed = hci_dev_test_and_clear_flag(hdev,
2992 						      HCI_KEEP_DEBUG_KEYS);
2993 
2994 	if (changed)
2995 		new_settings(hdev, NULL);
2996 
2997 	for (i = 0; i < key_count; i++) {
2998 		struct mgmt_link_key_info *key = &cp->keys[i];
2999 
3000 		if (hci_is_blocked_key(hdev,
3001 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
3002 				       key->val)) {
3003 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3004 				    &key->addr.bdaddr);
3005 			continue;
3006 		}
3007 
3008 		if (key->addr.type != BDADDR_BREDR) {
3009 			bt_dev_warn(hdev,
3010 				    "Invalid link address type %u for %pMR",
3011 				    key->addr.type, &key->addr.bdaddr);
3012 			continue;
3013 		}
3014 
3015 		if (key->type > 0x08) {
3016 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3017 				    key->type, &key->addr.bdaddr);
3018 			continue;
3019 		}
3020 
3021 		/* Always ignore debug keys and require a new pairing if
3022 		 * the user wants to use them.
3023 		 */
3024 		if (key->type == HCI_LK_DEBUG_COMBINATION)
3025 			continue;
3026 
3027 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3028 				 key->type, key->pin_len, NULL);
3029 	}
3030 
3031 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3032 
3033 	hci_dev_unlock(hdev);
3034 
3035 	return 0;
3036 }
3037 
3038 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3039 			   u8 addr_type, struct sock *skip_sk)
3040 {
3041 	struct mgmt_ev_device_unpaired ev;
3042 
3043 	bacpy(&ev.addr.bdaddr, bdaddr);
3044 	ev.addr.type = addr_type;
3045 
3046 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3047 			  skip_sk);
3048 }
3049 
3050 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3051 {
3052 	struct mgmt_pending_cmd *cmd = data;
3053 	struct mgmt_cp_unpair_device *cp = cmd->param;
3054 
3055 	if (!err)
3056 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3057 
3058 	cmd->cmd_complete(cmd, err);
3059 	mgmt_pending_free(cmd);
3060 }
3061 
3062 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3063 {
3064 	struct mgmt_pending_cmd *cmd = data;
3065 	struct mgmt_cp_unpair_device *cp = cmd->param;
3066 	struct hci_conn *conn;
3067 
3068 	if (cp->addr.type == BDADDR_BREDR)
3069 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3070 					       &cp->addr.bdaddr);
3071 	else
3072 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3073 					       le_addr_type(cp->addr.type));
3074 
3075 	if (!conn)
3076 		return 0;
3077 
3078 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3079 	 * will clean up the connection no matter the error.
3080 	 */
3081 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3082 
3083 	return 0;
3084 }
3085 
3086 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3087 			 u16 len)
3088 {
3089 	struct mgmt_cp_unpair_device *cp = data;
3090 	struct mgmt_rp_unpair_device rp;
3091 	struct hci_conn_params *params;
3092 	struct mgmt_pending_cmd *cmd;
3093 	struct hci_conn *conn;
3094 	u8 addr_type;
3095 	int err;
3096 
3097 	memset(&rp, 0, sizeof(rp));
3098 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3099 	rp.addr.type = cp->addr.type;
3100 
3101 	if (!bdaddr_type_is_valid(cp->addr.type))
3102 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3103 					 MGMT_STATUS_INVALID_PARAMS,
3104 					 &rp, sizeof(rp));
3105 
3106 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3107 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3108 					 MGMT_STATUS_INVALID_PARAMS,
3109 					 &rp, sizeof(rp));
3110 
3111 	hci_dev_lock(hdev);
3112 
3113 	if (!hdev_is_powered(hdev)) {
3114 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3115 					MGMT_STATUS_NOT_POWERED, &rp,
3116 					sizeof(rp));
3117 		goto unlock;
3118 	}
3119 
3120 	if (cp->addr.type == BDADDR_BREDR) {
3121 		/* If disconnection is requested, then look up the
3122 		 * connection. If the remote device is connected, it
3123 		 * will be later used to terminate the link.
3124 		 *
3125 		 * Setting it to NULL explicitly will cause no
3126 		 * termination of the link.
3127 		 */
3128 		if (cp->disconnect)
3129 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3130 						       &cp->addr.bdaddr);
3131 		else
3132 			conn = NULL;
3133 
3134 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3135 		if (err < 0) {
3136 			err = mgmt_cmd_complete(sk, hdev->id,
3137 						MGMT_OP_UNPAIR_DEVICE,
3138 						MGMT_STATUS_NOT_PAIRED, &rp,
3139 						sizeof(rp));
3140 			goto unlock;
3141 		}
3142 
3143 		goto done;
3144 	}
3145 
3146 	/* LE address type */
3147 	addr_type = le_addr_type(cp->addr.type);
3148 
3149 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3150 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3151 	if (err < 0) {
3152 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3153 					MGMT_STATUS_NOT_PAIRED, &rp,
3154 					sizeof(rp));
3155 		goto unlock;
3156 	}
3157 
3158 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3159 	if (!conn) {
3160 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3161 		goto done;
3162 	}
3163 
3164 
3165 	/* Defer clearing up the connection parameters until closing to
3166 	 * give a chance of keeping them if a repairing happens.
3167 	 */
3168 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3169 
3170 	/* Disable auto-connection parameters if present */
3171 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3172 	if (params) {
3173 		if (params->explicit_connect)
3174 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3175 		else
3176 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3177 	}
3178 
3179 	/* If disconnection is not requested, then clear the connection
3180 	 * variable so that the link is not terminated.
3181 	 */
3182 	if (!cp->disconnect)
3183 		conn = NULL;
3184 
3185 done:
3186 	/* If the connection variable is set, then termination of the
3187 	 * link is requested.
3188 	 */
3189 	if (!conn) {
3190 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3191 					&rp, sizeof(rp));
3192 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3193 		goto unlock;
3194 	}
3195 
3196 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3197 			       sizeof(*cp));
3198 	if (!cmd) {
3199 		err = -ENOMEM;
3200 		goto unlock;
3201 	}
3202 
3203 	cmd->cmd_complete = addr_cmd_complete;
3204 
3205 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3206 				 unpair_device_complete);
3207 	if (err < 0)
3208 		mgmt_pending_free(cmd);
3209 
3210 unlock:
3211 	hci_dev_unlock(hdev);
3212 	return err;
3213 }
3214 
3215 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3216 {
3217 	struct mgmt_pending_cmd *cmd = data;
3218 
3219 	cmd->cmd_complete(cmd, mgmt_status(err));
3220 	mgmt_pending_free(cmd);
3221 }
3222 
3223 static int disconnect_sync(struct hci_dev *hdev, void *data)
3224 {
3225 	struct mgmt_pending_cmd *cmd = data;
3226 	struct mgmt_cp_disconnect *cp = cmd->param;
3227 	struct hci_conn *conn;
3228 
3229 	if (cp->addr.type == BDADDR_BREDR)
3230 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3231 					       &cp->addr.bdaddr);
3232 	else
3233 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3234 					       le_addr_type(cp->addr.type));
3235 
3236 	if (!conn)
3237 		return -ENOTCONN;
3238 
3239 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3240 	 * will clean up the connection no matter the error.
3241 	 */
3242 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3243 
3244 	return 0;
3245 }
3246 
3247 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3248 		      u16 len)
3249 {
3250 	struct mgmt_cp_disconnect *cp = data;
3251 	struct mgmt_rp_disconnect rp;
3252 	struct mgmt_pending_cmd *cmd;
3253 	int err;
3254 
3255 	bt_dev_dbg(hdev, "sock %p", sk);
3256 
3257 	memset(&rp, 0, sizeof(rp));
3258 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3259 	rp.addr.type = cp->addr.type;
3260 
3261 	if (!bdaddr_type_is_valid(cp->addr.type))
3262 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3263 					 MGMT_STATUS_INVALID_PARAMS,
3264 					 &rp, sizeof(rp));
3265 
3266 	hci_dev_lock(hdev);
3267 
3268 	if (!test_bit(HCI_UP, &hdev->flags)) {
3269 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3270 					MGMT_STATUS_NOT_POWERED, &rp,
3271 					sizeof(rp));
3272 		goto failed;
3273 	}
3274 
3275 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3276 	if (!cmd) {
3277 		err = -ENOMEM;
3278 		goto failed;
3279 	}
3280 
3281 	cmd->cmd_complete = generic_cmd_complete;
3282 
3283 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3284 				 disconnect_complete);
3285 	if (err < 0)
3286 		mgmt_pending_free(cmd);
3287 
3288 failed:
3289 	hci_dev_unlock(hdev);
3290 	return err;
3291 }
3292 
3293 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3294 {
3295 	switch (link_type) {
3296 	case CIS_LINK:
3297 	case BIS_LINK:
3298 	case PA_LINK:
3299 	case LE_LINK:
3300 		switch (addr_type) {
3301 		case ADDR_LE_DEV_PUBLIC:
3302 			return BDADDR_LE_PUBLIC;
3303 
3304 		default:
3305 			/* Fallback to LE Random address type */
3306 			return BDADDR_LE_RANDOM;
3307 		}
3308 
3309 	default:
3310 		/* Fallback to BR/EDR type */
3311 		return BDADDR_BREDR;
3312 	}
3313 }
3314 
3315 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3316 			   u16 data_len)
3317 {
3318 	struct mgmt_rp_get_connections *rp;
3319 	struct hci_conn *c;
3320 	int err;
3321 	u16 i;
3322 
3323 	bt_dev_dbg(hdev, "sock %p", sk);
3324 
3325 	hci_dev_lock(hdev);
3326 
3327 	if (!hdev_is_powered(hdev)) {
3328 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3329 				      MGMT_STATUS_NOT_POWERED);
3330 		goto unlock;
3331 	}
3332 
3333 	i = 0;
3334 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3335 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3336 			i++;
3337 	}
3338 
3339 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3340 	if (!rp) {
3341 		err = -ENOMEM;
3342 		goto unlock;
3343 	}
3344 
3345 	i = 0;
3346 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3347 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3348 			continue;
3349 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3350 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3351 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3352 			continue;
3353 		i++;
3354 	}
3355 
3356 	rp->conn_count = cpu_to_le16(i);
3357 
3358 	/* Recalculate length in case of filtered SCO connections, etc */
3359 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3360 				struct_size(rp, addr, i));
3361 
3362 	kfree(rp);
3363 
3364 unlock:
3365 	hci_dev_unlock(hdev);
3366 	return err;
3367 }
3368 
3369 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370 				   struct mgmt_cp_pin_code_neg_reply *cp)
3371 {
3372 	struct mgmt_pending_cmd *cmd;
3373 	int err;
3374 
3375 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3376 			       sizeof(*cp));
3377 	if (!cmd)
3378 		return -ENOMEM;
3379 
3380 	cmd->cmd_complete = addr_cmd_complete;
3381 
3382 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3383 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3384 	if (err < 0)
3385 		mgmt_pending_remove(cmd);
3386 
3387 	return err;
3388 }
3389 
3390 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct hci_conn *conn;
3394 	struct mgmt_cp_pin_code_reply *cp = data;
3395 	struct hci_cp_pin_code_reply reply;
3396 	struct mgmt_pending_cmd *cmd;
3397 	int err;
3398 
3399 	bt_dev_dbg(hdev, "sock %p", sk);
3400 
3401 	hci_dev_lock(hdev);
3402 
3403 	if (!hdev_is_powered(hdev)) {
3404 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3405 				      MGMT_STATUS_NOT_POWERED);
3406 		goto failed;
3407 	}
3408 
3409 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3410 	if (!conn) {
3411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3412 				      MGMT_STATUS_NOT_CONNECTED);
3413 		goto failed;
3414 	}
3415 
3416 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3417 		struct mgmt_cp_pin_code_neg_reply ncp;
3418 
3419 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3420 
3421 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3422 
3423 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3424 		if (err >= 0)
3425 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3426 					      MGMT_STATUS_INVALID_PARAMS);
3427 
3428 		goto failed;
3429 	}
3430 
3431 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3432 	if (!cmd) {
3433 		err = -ENOMEM;
3434 		goto failed;
3435 	}
3436 
3437 	cmd->cmd_complete = addr_cmd_complete;
3438 
3439 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3440 	reply.pin_len = cp->pin_len;
3441 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3442 
3443 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3444 	if (err < 0)
3445 		mgmt_pending_remove(cmd);
3446 
3447 failed:
3448 	hci_dev_unlock(hdev);
3449 	return err;
3450 }
3451 
3452 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3453 			     u16 len)
3454 {
3455 	struct mgmt_cp_set_io_capability *cp = data;
3456 
3457 	bt_dev_dbg(hdev, "sock %p", sk);
3458 
3459 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3461 				       MGMT_STATUS_INVALID_PARAMS);
3462 
3463 	hci_dev_lock(hdev);
3464 
3465 	hdev->io_capability = cp->io_capability;
3466 
3467 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3468 
3469 	hci_dev_unlock(hdev);
3470 
3471 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3472 				 NULL, 0);
3473 }
3474 
3475 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3476 {
3477 	struct hci_dev *hdev = conn->hdev;
3478 	struct mgmt_pending_cmd *cmd;
3479 
3480 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3481 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3482 			continue;
3483 
3484 		if (cmd->user_data != conn)
3485 			continue;
3486 
3487 		return cmd;
3488 	}
3489 
3490 	return NULL;
3491 }
3492 
3493 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3494 {
3495 	struct mgmt_rp_pair_device rp;
3496 	struct hci_conn *conn = cmd->user_data;
3497 	int err;
3498 
3499 	bacpy(&rp.addr.bdaddr, &conn->dst);
3500 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3501 
3502 	err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3503 				status, &rp, sizeof(rp));
3504 
3505 	/* So we don't get further callbacks for this connection */
3506 	conn->connect_cfm_cb = NULL;
3507 	conn->security_cfm_cb = NULL;
3508 	conn->disconn_cfm_cb = NULL;
3509 
3510 	hci_conn_drop(conn);
3511 
3512 	/* The device is paired so there is no need to remove
3513 	 * its connection parameters anymore.
3514 	 */
3515 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3516 
3517 	hci_conn_put(conn);
3518 
3519 	return err;
3520 }
3521 
3522 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3523 {
3524 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3525 	struct mgmt_pending_cmd *cmd;
3526 
3527 	cmd = find_pairing(conn);
3528 	if (cmd) {
3529 		cmd->cmd_complete(cmd, status);
3530 		mgmt_pending_remove(cmd);
3531 	}
3532 }
3533 
3534 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3535 {
3536 	struct mgmt_pending_cmd *cmd;
3537 
3538 	BT_DBG("status %u", status);
3539 
3540 	cmd = find_pairing(conn);
3541 	if (!cmd) {
3542 		BT_DBG("Unable to find a pending command");
3543 		return;
3544 	}
3545 
3546 	cmd->cmd_complete(cmd, mgmt_status(status));
3547 	mgmt_pending_remove(cmd);
3548 }
3549 
3550 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3551 {
3552 	struct mgmt_pending_cmd *cmd;
3553 
3554 	BT_DBG("status %u", status);
3555 
3556 	if (!status)
3557 		return;
3558 
3559 	cmd = find_pairing(conn);
3560 	if (!cmd) {
3561 		BT_DBG("Unable to find a pending command");
3562 		return;
3563 	}
3564 
3565 	cmd->cmd_complete(cmd, mgmt_status(status));
3566 	mgmt_pending_remove(cmd);
3567 }
3568 
3569 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3570 		       u16 len)
3571 {
3572 	struct mgmt_cp_pair_device *cp = data;
3573 	struct mgmt_rp_pair_device rp;
3574 	struct mgmt_pending_cmd *cmd;
3575 	u8 sec_level, auth_type;
3576 	struct hci_conn *conn;
3577 	int err;
3578 
3579 	bt_dev_dbg(hdev, "sock %p", sk);
3580 
3581 	memset(&rp, 0, sizeof(rp));
3582 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3583 	rp.addr.type = cp->addr.type;
3584 
3585 	if (!bdaddr_type_is_valid(cp->addr.type))
3586 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3587 					 MGMT_STATUS_INVALID_PARAMS,
3588 					 &rp, sizeof(rp));
3589 
3590 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3591 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3592 					 MGMT_STATUS_INVALID_PARAMS,
3593 					 &rp, sizeof(rp));
3594 
3595 	hci_dev_lock(hdev);
3596 
3597 	if (!hdev_is_powered(hdev)) {
3598 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3599 					MGMT_STATUS_NOT_POWERED, &rp,
3600 					sizeof(rp));
3601 		goto unlock;
3602 	}
3603 
3604 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3605 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3606 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3607 					sizeof(rp));
3608 		goto unlock;
3609 	}
3610 
3611 	sec_level = BT_SECURITY_MEDIUM;
3612 	auth_type = HCI_AT_DEDICATED_BONDING;
3613 
3614 	if (cp->addr.type == BDADDR_BREDR) {
3615 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3616 				       auth_type, CONN_REASON_PAIR_DEVICE,
3617 				       HCI_ACL_CONN_TIMEOUT);
3618 	} else {
3619 		u8 addr_type = le_addr_type(cp->addr.type);
3620 		struct hci_conn_params *p;
3621 
3622 		/* When pairing a new device, it is expected to remember
3623 		 * this device for future connections. Adding the connection
3624 		 * parameter information ahead of time allows tracking
3625 		 * of the peripheral preferred values and will speed up any
3626 		 * further connection establishment.
3627 		 *
3628 		 * If connection parameters already exist, then they
3629 		 * will be kept and this function does nothing.
3630 		 */
3631 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3632 		if (!p) {
3633 			err = -EIO;
3634 			goto unlock;
3635 		}
3636 
3637 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3638 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3639 
3640 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3641 					   sec_level, HCI_LE_CONN_TIMEOUT,
3642 					   CONN_REASON_PAIR_DEVICE);
3643 	}
3644 
3645 	if (IS_ERR(conn)) {
3646 		int status;
3647 
3648 		if (PTR_ERR(conn) == -EBUSY)
3649 			status = MGMT_STATUS_BUSY;
3650 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3651 			status = MGMT_STATUS_NOT_SUPPORTED;
3652 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3653 			status = MGMT_STATUS_REJECTED;
3654 		else
3655 			status = MGMT_STATUS_CONNECT_FAILED;
3656 
3657 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3658 					status, &rp, sizeof(rp));
3659 		goto unlock;
3660 	}
3661 
3662 	if (conn->connect_cfm_cb) {
3663 		hci_conn_drop(conn);
3664 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3665 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3666 		goto unlock;
3667 	}
3668 
3669 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3670 	if (!cmd) {
3671 		err = -ENOMEM;
3672 		hci_conn_drop(conn);
3673 		goto unlock;
3674 	}
3675 
3676 	cmd->cmd_complete = pairing_complete;
3677 
3678 	/* For LE, just connecting isn't a proof that the pairing finished */
3679 	if (cp->addr.type == BDADDR_BREDR) {
3680 		conn->connect_cfm_cb = pairing_complete_cb;
3681 		conn->security_cfm_cb = pairing_complete_cb;
3682 		conn->disconn_cfm_cb = pairing_complete_cb;
3683 	} else {
3684 		conn->connect_cfm_cb = le_pairing_complete_cb;
3685 		conn->security_cfm_cb = le_pairing_complete_cb;
3686 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3687 	}
3688 
3689 	conn->io_capability = cp->io_cap;
3690 	cmd->user_data = hci_conn_get(conn);
3691 
3692 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3693 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3694 		cmd->cmd_complete(cmd, 0);
3695 		mgmt_pending_remove(cmd);
3696 	}
3697 
3698 	err = 0;
3699 
3700 unlock:
3701 	hci_dev_unlock(hdev);
3702 	return err;
3703 }
3704 
3705 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3706 			      u16 len)
3707 {
3708 	struct mgmt_addr_info *addr = data;
3709 	struct mgmt_pending_cmd *cmd;
3710 	struct hci_conn *conn;
3711 	int err;
3712 
3713 	bt_dev_dbg(hdev, "sock %p", sk);
3714 
3715 	hci_dev_lock(hdev);
3716 
3717 	if (!hdev_is_powered(hdev)) {
3718 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3719 				      MGMT_STATUS_NOT_POWERED);
3720 		goto unlock;
3721 	}
3722 
3723 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3724 	if (!cmd) {
3725 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3726 				      MGMT_STATUS_INVALID_PARAMS);
3727 		goto unlock;
3728 	}
3729 
3730 	conn = cmd->user_data;
3731 
3732 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3733 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3734 				      MGMT_STATUS_INVALID_PARAMS);
3735 		goto unlock;
3736 	}
3737 
3738 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3739 	mgmt_pending_remove(cmd);
3740 
3741 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3742 				addr, sizeof(*addr));
3743 
3744 	/* Since user doesn't want to proceed with the connection, abort any
3745 	 * ongoing pairing and then terminate the link if it was created
3746 	 * because of the pair device action.
3747 	 */
3748 	if (addr->type == BDADDR_BREDR)
3749 		hci_remove_link_key(hdev, &addr->bdaddr);
3750 	else
3751 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3752 					      le_addr_type(addr->type));
3753 
3754 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3755 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3756 
3757 unlock:
3758 	hci_dev_unlock(hdev);
3759 	return err;
3760 }
3761 
3762 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3763 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3764 			     u16 hci_op, __le32 passkey)
3765 {
3766 	struct mgmt_pending_cmd *cmd;
3767 	struct hci_conn *conn;
3768 	int err;
3769 
3770 	hci_dev_lock(hdev);
3771 
3772 	if (!hdev_is_powered(hdev)) {
3773 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3774 					MGMT_STATUS_NOT_POWERED, addr,
3775 					sizeof(*addr));
3776 		goto done;
3777 	}
3778 
3779 	if (addr->type == BDADDR_BREDR)
3780 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3781 	else
3782 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3783 					       le_addr_type(addr->type));
3784 
3785 	if (!conn) {
3786 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3787 					MGMT_STATUS_NOT_CONNECTED, addr,
3788 					sizeof(*addr));
3789 		goto done;
3790 	}
3791 
3792 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3793 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3794 		if (!err)
3795 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3796 						MGMT_STATUS_SUCCESS, addr,
3797 						sizeof(*addr));
3798 		else
3799 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3800 						MGMT_STATUS_FAILED, addr,
3801 						sizeof(*addr));
3802 
3803 		goto done;
3804 	}
3805 
3806 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3807 	if (!cmd) {
3808 		err = -ENOMEM;
3809 		goto done;
3810 	}
3811 
3812 	cmd->cmd_complete = addr_cmd_complete;
3813 
3814 	/* Continue with pairing via HCI */
3815 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3816 		struct hci_cp_user_passkey_reply cp;
3817 
3818 		bacpy(&cp.bdaddr, &addr->bdaddr);
3819 		cp.passkey = passkey;
3820 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3821 	} else
3822 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3823 				   &addr->bdaddr);
3824 
3825 	if (err < 0)
3826 		mgmt_pending_remove(cmd);
3827 
3828 done:
3829 	hci_dev_unlock(hdev);
3830 	return err;
3831 }
3832 
3833 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3834 			      void *data, u16 len)
3835 {
3836 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	return user_pairing_resp(sk, hdev, &cp->addr,
3841 				MGMT_OP_PIN_CODE_NEG_REPLY,
3842 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3843 }
3844 
3845 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3846 			      u16 len)
3847 {
3848 	struct mgmt_cp_user_confirm_reply *cp = data;
3849 
3850 	bt_dev_dbg(hdev, "sock %p", sk);
3851 
3852 	if (len != sizeof(*cp))
3853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3854 				       MGMT_STATUS_INVALID_PARAMS);
3855 
3856 	return user_pairing_resp(sk, hdev, &cp->addr,
3857 				 MGMT_OP_USER_CONFIRM_REPLY,
3858 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3859 }
3860 
3861 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3862 				  void *data, u16 len)
3863 {
3864 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3865 
3866 	bt_dev_dbg(hdev, "sock %p", sk);
3867 
3868 	return user_pairing_resp(sk, hdev, &cp->addr,
3869 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3870 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3871 }
3872 
3873 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3874 			      u16 len)
3875 {
3876 	struct mgmt_cp_user_passkey_reply *cp = data;
3877 
3878 	bt_dev_dbg(hdev, "sock %p", sk);
3879 
3880 	return user_pairing_resp(sk, hdev, &cp->addr,
3881 				 MGMT_OP_USER_PASSKEY_REPLY,
3882 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3883 }
3884 
3885 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3886 				  void *data, u16 len)
3887 {
3888 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3889 
3890 	bt_dev_dbg(hdev, "sock %p", sk);
3891 
3892 	return user_pairing_resp(sk, hdev, &cp->addr,
3893 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3894 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3895 }
3896 
3897 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3898 {
3899 	struct adv_info *adv_instance;
3900 
3901 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3902 	if (!adv_instance)
3903 		return 0;
3904 
3905 	/* stop if current instance doesn't need to be changed */
3906 	if (!(adv_instance->flags & flags))
3907 		return 0;
3908 
3909 	cancel_adv_timeout(hdev);
3910 
3911 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3912 	if (!adv_instance)
3913 		return 0;
3914 
3915 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3916 
3917 	return 0;
3918 }
3919 
3920 static int name_changed_sync(struct hci_dev *hdev, void *data)
3921 {
3922 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3923 }
3924 
3925 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3926 {
3927 	struct mgmt_pending_cmd *cmd = data;
3928 	struct mgmt_cp_set_local_name *cp;
3929 	u8 status = mgmt_status(err);
3930 
3931 	bt_dev_dbg(hdev, "err %d", err);
3932 
3933 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3934 		return;
3935 
3936 	cp = cmd->param;
3937 
3938 	if (status) {
3939 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3940 				status);
3941 	} else {
3942 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3943 				  cp, sizeof(*cp));
3944 
3945 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3947 	}
3948 
3949 	mgmt_pending_free(cmd);
3950 }
3951 
3952 static int set_name_sync(struct hci_dev *hdev, void *data)
3953 {
3954 	struct mgmt_pending_cmd *cmd = data;
3955 	struct mgmt_cp_set_local_name cp;
3956 
3957 	mutex_lock(&hdev->mgmt_pending_lock);
3958 
3959 	if (!__mgmt_pending_listed(hdev, cmd)) {
3960 		mutex_unlock(&hdev->mgmt_pending_lock);
3961 		return -ECANCELED;
3962 	}
3963 
3964 	memcpy(&cp, cmd->param, sizeof(cp));
3965 
3966 	mutex_unlock(&hdev->mgmt_pending_lock);
3967 
3968 	if (lmp_bredr_capable(hdev)) {
3969 		hci_update_name_sync(hdev, cp.name);
3970 		hci_update_eir_sync(hdev);
3971 	}
3972 
3973 	/* The name is stored in the scan response data and so
3974 	 * no need to update the advertising data here.
3975 	 */
3976 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3977 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3978 
3979 	return 0;
3980 }
3981 
3982 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3983 			  u16 len)
3984 {
3985 	struct mgmt_cp_set_local_name *cp = data;
3986 	struct mgmt_pending_cmd *cmd;
3987 	int err;
3988 
3989 	bt_dev_dbg(hdev, "sock %p", sk);
3990 
3991 	hci_dev_lock(hdev);
3992 
3993 	/* If the old values are the same as the new ones just return a
3994 	 * direct command complete event.
3995 	 */
3996 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3997 	    !memcmp(hdev->short_name, cp->short_name,
3998 		    sizeof(hdev->short_name))) {
3999 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4000 					data, len);
4001 		goto failed;
4002 	}
4003 
4004 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4005 
4006 	if (!hdev_is_powered(hdev)) {
4007 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4008 
4009 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4010 					data, len);
4011 		if (err < 0)
4012 			goto failed;
4013 
4014 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4015 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4016 		ext_info_changed(hdev, sk);
4017 
4018 		goto failed;
4019 	}
4020 
4021 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4022 	if (!cmd)
4023 		err = -ENOMEM;
4024 	else
4025 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4026 					 set_name_complete);
4027 
4028 	if (err < 0) {
4029 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4030 				      MGMT_STATUS_FAILED);
4031 
4032 		if (cmd)
4033 			mgmt_pending_remove(cmd);
4034 
4035 		goto failed;
4036 	}
4037 
4038 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4039 
4040 failed:
4041 	hci_dev_unlock(hdev);
4042 	return err;
4043 }
4044 
4045 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4046 {
4047 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4048 }
4049 
4050 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4051 			  u16 len)
4052 {
4053 	struct mgmt_cp_set_appearance *cp = data;
4054 	u16 appearance;
4055 	int err;
4056 
4057 	bt_dev_dbg(hdev, "sock %p", sk);
4058 
4059 	if (!lmp_le_capable(hdev))
4060 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4061 				       MGMT_STATUS_NOT_SUPPORTED);
4062 
4063 	appearance = le16_to_cpu(cp->appearance);
4064 
4065 	hci_dev_lock(hdev);
4066 
4067 	if (hdev->appearance != appearance) {
4068 		hdev->appearance = appearance;
4069 
4070 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4071 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4072 					   NULL);
4073 
4074 		ext_info_changed(hdev, sk);
4075 	}
4076 
4077 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4078 				0);
4079 
4080 	hci_dev_unlock(hdev);
4081 
4082 	return err;
4083 }
4084 
4085 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4086 				 void *data, u16 len)
4087 {
4088 	struct mgmt_rp_get_phy_configuration rp;
4089 
4090 	bt_dev_dbg(hdev, "sock %p", sk);
4091 
4092 	hci_dev_lock(hdev);
4093 
4094 	memset(&rp, 0, sizeof(rp));
4095 
4096 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4097 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4098 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4099 
4100 	hci_dev_unlock(hdev);
4101 
4102 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4103 				 &rp, sizeof(rp));
4104 }
4105 
4106 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4107 {
4108 	struct mgmt_ev_phy_configuration_changed ev;
4109 
4110 	memset(&ev, 0, sizeof(ev));
4111 
4112 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4113 
4114 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4115 			  sizeof(ev), skip);
4116 }
4117 
4118 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4119 {
4120 	struct mgmt_pending_cmd *cmd = data;
4121 	struct sk_buff *skb;
4122 	u8 status = mgmt_status(err);
4123 
4124 	skb = cmd->skb;
4125 
4126 	if (!status) {
4127 		if (!skb)
4128 			status = MGMT_STATUS_FAILED;
4129 		else if (IS_ERR(skb))
4130 			status = mgmt_status(PTR_ERR(skb));
4131 		else
4132 			status = mgmt_status(skb->data[0]);
4133 	}
4134 
4135 	bt_dev_dbg(hdev, "status %d", status);
4136 
4137 	if (status) {
4138 		mgmt_cmd_status(cmd->sk, hdev->id,
4139 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4140 	} else {
4141 		mgmt_cmd_complete(cmd->sk, hdev->id,
4142 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4143 				  NULL, 0);
4144 
4145 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4146 	}
4147 
4148 	if (skb && !IS_ERR(skb))
4149 		kfree_skb(skb);
4150 
4151 	mgmt_pending_free(cmd);
4152 }
4153 
4154 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4155 {
4156 	struct mgmt_pending_cmd *cmd = data;
4157 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4158 	struct hci_cp_le_set_default_phy cp_phy;
4159 	u32 selected_phys;
4160 
4161 	selected_phys = __le32_to_cpu(cp->selected_phys);
4162 
4163 	memset(&cp_phy, 0, sizeof(cp_phy));
4164 
4165 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4166 		cp_phy.all_phys |= 0x01;
4167 
4168 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4169 		cp_phy.all_phys |= 0x02;
4170 
4171 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4172 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4173 
4174 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4175 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4176 
4177 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4178 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4179 
4180 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4181 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4182 
4183 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4184 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4185 
4186 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4187 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4188 
4189 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4190 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4191 
4192 	return 0;
4193 }
4194 
4195 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4196 				 void *data, u16 len)
4197 {
4198 	struct mgmt_cp_set_phy_configuration *cp = data;
4199 	struct mgmt_pending_cmd *cmd;
4200 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4201 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4202 	bool changed = false;
4203 	int err;
4204 
4205 	bt_dev_dbg(hdev, "sock %p", sk);
4206 
4207 	configurable_phys = get_configurable_phys(hdev);
4208 	supported_phys = get_supported_phys(hdev);
4209 	selected_phys = __le32_to_cpu(cp->selected_phys);
4210 
4211 	if (selected_phys & ~supported_phys)
4212 		return mgmt_cmd_status(sk, hdev->id,
4213 				       MGMT_OP_SET_PHY_CONFIGURATION,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 
4216 	unconfigure_phys = supported_phys & ~configurable_phys;
4217 
4218 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4219 		return mgmt_cmd_status(sk, hdev->id,
4220 				       MGMT_OP_SET_PHY_CONFIGURATION,
4221 				       MGMT_STATUS_INVALID_PARAMS);
4222 
4223 	if (selected_phys == get_selected_phys(hdev))
4224 		return mgmt_cmd_complete(sk, hdev->id,
4225 					 MGMT_OP_SET_PHY_CONFIGURATION,
4226 					 0, NULL, 0);
4227 
4228 	hci_dev_lock(hdev);
4229 
4230 	if (!hdev_is_powered(hdev)) {
4231 		err = mgmt_cmd_status(sk, hdev->id,
4232 				      MGMT_OP_SET_PHY_CONFIGURATION,
4233 				      MGMT_STATUS_REJECTED);
4234 		goto unlock;
4235 	}
4236 
4237 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4238 		err = mgmt_cmd_status(sk, hdev->id,
4239 				      MGMT_OP_SET_PHY_CONFIGURATION,
4240 				      MGMT_STATUS_BUSY);
4241 		goto unlock;
4242 	}
4243 
4244 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4245 		pkt_type |= (HCI_DH3 | HCI_DM3);
4246 	else
4247 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4248 
4249 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4250 		pkt_type |= (HCI_DH5 | HCI_DM5);
4251 	else
4252 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4253 
4254 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4255 		pkt_type &= ~HCI_2DH1;
4256 	else
4257 		pkt_type |= HCI_2DH1;
4258 
4259 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4260 		pkt_type &= ~HCI_2DH3;
4261 	else
4262 		pkt_type |= HCI_2DH3;
4263 
4264 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4265 		pkt_type &= ~HCI_2DH5;
4266 	else
4267 		pkt_type |= HCI_2DH5;
4268 
4269 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4270 		pkt_type &= ~HCI_3DH1;
4271 	else
4272 		pkt_type |= HCI_3DH1;
4273 
4274 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4275 		pkt_type &= ~HCI_3DH3;
4276 	else
4277 		pkt_type |= HCI_3DH3;
4278 
4279 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4280 		pkt_type &= ~HCI_3DH5;
4281 	else
4282 		pkt_type |= HCI_3DH5;
4283 
4284 	if (pkt_type != hdev->pkt_type) {
4285 		hdev->pkt_type = pkt_type;
4286 		changed = true;
4287 	}
4288 
4289 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4290 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4291 		if (changed)
4292 			mgmt_phy_configuration_changed(hdev, sk);
4293 
4294 		err = mgmt_cmd_complete(sk, hdev->id,
4295 					MGMT_OP_SET_PHY_CONFIGURATION,
4296 					0, NULL, 0);
4297 
4298 		goto unlock;
4299 	}
4300 
4301 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4302 			       len);
4303 	if (!cmd)
4304 		err = -ENOMEM;
4305 	else
4306 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4307 					 set_default_phy_complete);
4308 
4309 	if (err < 0) {
4310 		err = mgmt_cmd_status(sk, hdev->id,
4311 				      MGMT_OP_SET_PHY_CONFIGURATION,
4312 				      MGMT_STATUS_FAILED);
4313 
4314 		if (cmd)
4315 			mgmt_pending_remove(cmd);
4316 	}
4317 
4318 unlock:
4319 	hci_dev_unlock(hdev);
4320 
4321 	return err;
4322 }
4323 
4324 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4325 			    u16 len)
4326 {
4327 	int err = MGMT_STATUS_SUCCESS;
4328 	struct mgmt_cp_set_blocked_keys *keys = data;
4329 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4330 				   sizeof(struct mgmt_blocked_key_info));
4331 	u16 key_count, expected_len;
4332 	int i;
4333 
4334 	bt_dev_dbg(hdev, "sock %p", sk);
4335 
4336 	key_count = __le16_to_cpu(keys->key_count);
4337 	if (key_count > max_key_count) {
4338 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4339 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4340 				       MGMT_STATUS_INVALID_PARAMS);
4341 	}
4342 
4343 	expected_len = struct_size(keys, keys, key_count);
4344 	if (expected_len != len) {
4345 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4346 			   expected_len, len);
4347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4348 				       MGMT_STATUS_INVALID_PARAMS);
4349 	}
4350 
4351 	hci_dev_lock(hdev);
4352 
4353 	hci_blocked_keys_clear(hdev);
4354 
4355 	for (i = 0; i < key_count; ++i) {
4356 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4357 
4358 		if (!b) {
4359 			err = MGMT_STATUS_NO_RESOURCES;
4360 			break;
4361 		}
4362 
4363 		b->type = keys->keys[i].type;
4364 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4365 		list_add_rcu(&b->list, &hdev->blocked_keys);
4366 	}
4367 	hci_dev_unlock(hdev);
4368 
4369 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4370 				err, NULL, 0);
4371 }
4372 
4373 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4374 			       void *data, u16 len)
4375 {
4376 	struct mgmt_mode *cp = data;
4377 	int err;
4378 	bool changed = false;
4379 
4380 	bt_dev_dbg(hdev, "sock %p", sk);
4381 
4382 	if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4383 		return mgmt_cmd_status(sk, hdev->id,
4384 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4385 				       MGMT_STATUS_NOT_SUPPORTED);
4386 
4387 	if (cp->val != 0x00 && cp->val != 0x01)
4388 		return mgmt_cmd_status(sk, hdev->id,
4389 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4390 				       MGMT_STATUS_INVALID_PARAMS);
4391 
4392 	hci_dev_lock(hdev);
4393 
4394 	if (hdev_is_powered(hdev) &&
4395 	    !!cp->val != hci_dev_test_flag(hdev,
4396 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4397 		err = mgmt_cmd_status(sk, hdev->id,
4398 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4399 				      MGMT_STATUS_REJECTED);
4400 		goto unlock;
4401 	}
4402 
4403 	if (cp->val)
4404 		changed = !hci_dev_test_and_set_flag(hdev,
4405 						   HCI_WIDEBAND_SPEECH_ENABLED);
4406 	else
4407 		changed = hci_dev_test_and_clear_flag(hdev,
4408 						   HCI_WIDEBAND_SPEECH_ENABLED);
4409 
4410 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4411 	if (err < 0)
4412 		goto unlock;
4413 
4414 	if (changed)
4415 		err = new_settings(hdev, sk);
4416 
4417 unlock:
4418 	hci_dev_unlock(hdev);
4419 	return err;
4420 }
4421 
4422 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4423 			       void *data, u16 data_len)
4424 {
4425 	char buf[20];
4426 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4427 	u16 cap_len = 0;
4428 	u8 flags = 0;
4429 	u8 tx_power_range[2];
4430 
4431 	bt_dev_dbg(hdev, "sock %p", sk);
4432 
4433 	memset(&buf, 0, sizeof(buf));
4434 
4435 	hci_dev_lock(hdev);
4436 
4437 	/* When the Read Simple Pairing Options command is supported, then
4438 	 * the remote public key validation is supported.
4439 	 *
4440 	 * Alternatively, when Microsoft extensions are available, they can
4441 	 * indicate support for public key validation as well.
4442 	 */
4443 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4444 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4445 
4446 	flags |= 0x02;		/* Remote public key validation (LE) */
4447 
4448 	/* When the Read Encryption Key Size command is supported, then the
4449 	 * encryption key size is enforced.
4450 	 */
4451 	if (hdev->commands[20] & 0x10)
4452 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4453 
4454 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4455 
4456 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4457 				  &flags, 1);
4458 
4459 	/* When the Read Simple Pairing Options command is supported, then
4460 	 * also max encryption key size information is provided.
4461 	 */
4462 	if (hdev->commands[41] & 0x08)
4463 		cap_len = eir_append_le16(rp->cap, cap_len,
4464 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4465 					  hdev->max_enc_key_size);
4466 
4467 	cap_len = eir_append_le16(rp->cap, cap_len,
4468 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4469 				  SMP_MAX_ENC_KEY_SIZE);
4470 
4471 	/* Append the min/max LE tx power parameters if we were able to fetch
4472 	 * it from the controller
4473 	 */
4474 	if (hdev->commands[38] & 0x80) {
4475 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4476 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4477 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4478 					  tx_power_range, 2);
4479 	}
4480 
4481 	rp->cap_len = cpu_to_le16(cap_len);
4482 
4483 	hci_dev_unlock(hdev);
4484 
4485 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4486 				 rp, sizeof(*rp) + cap_len);
4487 }
4488 
4489 #ifdef CONFIG_BT_FEATURE_DEBUG
4490 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4491 static const u8 debug_uuid[16] = {
4492 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4493 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4494 };
4495 #endif
4496 
4497 /* 330859bc-7506-492d-9370-9a6f0614037f */
4498 static const u8 quality_report_uuid[16] = {
4499 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4500 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4501 };
4502 
4503 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4504 static const u8 offload_codecs_uuid[16] = {
4505 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4506 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4507 };
4508 
4509 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4510 static const u8 le_simultaneous_roles_uuid[16] = {
4511 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4512 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4513 };
4514 
4515 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4516 static const u8 iso_socket_uuid[16] = {
4517 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4518 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4519 };
4520 
4521 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4522 static const u8 mgmt_mesh_uuid[16] = {
4523 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4524 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4525 };
4526 
4527 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4528 				  void *data, u16 data_len)
4529 {
4530 	struct mgmt_rp_read_exp_features_info *rp;
4531 	size_t len;
4532 	u16 idx = 0;
4533 	u32 flags;
4534 	int status;
4535 
4536 	bt_dev_dbg(hdev, "sock %p", sk);
4537 
4538 	/* Enough space for 7 features */
4539 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4540 	rp = kzalloc(len, GFP_KERNEL);
4541 	if (!rp)
4542 		return -ENOMEM;
4543 
4544 #ifdef CONFIG_BT_FEATURE_DEBUG
4545 	if (!hdev) {
4546 		flags = bt_dbg_get() ? BIT(0) : 0;
4547 
4548 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4549 		rp->features[idx].flags = cpu_to_le32(flags);
4550 		idx++;
4551 	}
4552 #endif
4553 
4554 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4555 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4556 			flags = BIT(0);
4557 		else
4558 			flags = 0;
4559 
4560 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4561 		rp->features[idx].flags = cpu_to_le32(flags);
4562 		idx++;
4563 	}
4564 
4565 	if (hdev && (aosp_has_quality_report(hdev) ||
4566 		     hdev->set_quality_report)) {
4567 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4568 			flags = BIT(0);
4569 		else
4570 			flags = 0;
4571 
4572 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4573 		rp->features[idx].flags = cpu_to_le32(flags);
4574 		idx++;
4575 	}
4576 
4577 	if (hdev && hdev->get_data_path_id) {
4578 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4579 			flags = BIT(0);
4580 		else
4581 			flags = 0;
4582 
4583 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4584 		rp->features[idx].flags = cpu_to_le32(flags);
4585 		idx++;
4586 	}
4587 
4588 	if (IS_ENABLED(CONFIG_BT_LE)) {
4589 		flags = iso_inited() ? BIT(0) : 0;
4590 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4591 		rp->features[idx].flags = cpu_to_le32(flags);
4592 		idx++;
4593 	}
4594 
4595 	if (hdev && lmp_le_capable(hdev)) {
4596 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4597 			flags = BIT(0);
4598 		else
4599 			flags = 0;
4600 
4601 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4602 		rp->features[idx].flags = cpu_to_le32(flags);
4603 		idx++;
4604 	}
4605 
4606 	rp->feature_count = cpu_to_le16(idx);
4607 
4608 	/* After reading the experimental features information, enable
4609 	 * the events to update client on any future change.
4610 	 */
4611 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4612 
4613 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4614 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4615 				   0, rp, sizeof(*rp) + (20 * idx));
4616 
4617 	kfree(rp);
4618 	return status;
4619 }
4620 
4621 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4622 			       bool enabled, struct sock *skip)
4623 {
4624 	struct mgmt_ev_exp_feature_changed ev;
4625 
4626 	memset(&ev, 0, sizeof(ev));
4627 	memcpy(ev.uuid, uuid, 16);
4628 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4629 
4630 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4631 				  &ev, sizeof(ev),
4632 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4633 }
4634 
4635 #define EXP_FEAT(_uuid, _set_func)	\
4636 {					\
4637 	.uuid = _uuid,			\
4638 	.set_func = _set_func,		\
4639 }
4640 
4641 /* The zero key uuid is special. Multiple exp features are set through it. */
4642 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4643 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4644 {
4645 	struct mgmt_rp_set_exp_feature rp;
4646 
4647 	memset(rp.uuid, 0, 16);
4648 	rp.flags = cpu_to_le32(0);
4649 
4650 #ifdef CONFIG_BT_FEATURE_DEBUG
4651 	if (!hdev) {
4652 		bool changed = bt_dbg_get();
4653 
4654 		bt_dbg_set(false);
4655 
4656 		if (changed)
4657 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4658 	}
4659 #endif
4660 
4661 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4662 
4663 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4664 				 MGMT_OP_SET_EXP_FEATURE, 0,
4665 				 &rp, sizeof(rp));
4666 }
4667 
4668 #ifdef CONFIG_BT_FEATURE_DEBUG
4669 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4670 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4671 {
4672 	struct mgmt_rp_set_exp_feature rp;
4673 
4674 	bool val, changed;
4675 	int err;
4676 
4677 	/* Command requires to use the non-controller index */
4678 	if (hdev)
4679 		return mgmt_cmd_status(sk, hdev->id,
4680 				       MGMT_OP_SET_EXP_FEATURE,
4681 				       MGMT_STATUS_INVALID_INDEX);
4682 
4683 	/* Parameters are limited to a single octet */
4684 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4685 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4686 				       MGMT_OP_SET_EXP_FEATURE,
4687 				       MGMT_STATUS_INVALID_PARAMS);
4688 
4689 	/* Only boolean on/off is supported */
4690 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4691 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4692 				       MGMT_OP_SET_EXP_FEATURE,
4693 				       MGMT_STATUS_INVALID_PARAMS);
4694 
4695 	val = !!cp->param[0];
4696 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4697 	bt_dbg_set(val);
4698 
4699 	memcpy(rp.uuid, debug_uuid, 16);
4700 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4701 
4702 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4703 
4704 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4705 				MGMT_OP_SET_EXP_FEATURE, 0,
4706 				&rp, sizeof(rp));
4707 
4708 	if (changed)
4709 		exp_feature_changed(hdev, debug_uuid, val, sk);
4710 
4711 	return err;
4712 }
4713 #endif
4714 
4715 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4716 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4717 {
4718 	struct mgmt_rp_set_exp_feature rp;
4719 	bool val, changed;
4720 	int err;
4721 
4722 	/* Command requires to use the controller index */
4723 	if (!hdev)
4724 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4725 				       MGMT_OP_SET_EXP_FEATURE,
4726 				       MGMT_STATUS_INVALID_INDEX);
4727 
4728 	/* Parameters are limited to a single octet */
4729 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4730 		return mgmt_cmd_status(sk, hdev->id,
4731 				       MGMT_OP_SET_EXP_FEATURE,
4732 				       MGMT_STATUS_INVALID_PARAMS);
4733 
4734 	/* Only boolean on/off is supported */
4735 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4736 		return mgmt_cmd_status(sk, hdev->id,
4737 				       MGMT_OP_SET_EXP_FEATURE,
4738 				       MGMT_STATUS_INVALID_PARAMS);
4739 
4740 	val = !!cp->param[0];
4741 
4742 	if (val) {
4743 		changed = !hci_dev_test_and_set_flag(hdev,
4744 						     HCI_MESH_EXPERIMENTAL);
4745 	} else {
4746 		hci_dev_clear_flag(hdev, HCI_MESH);
4747 		changed = hci_dev_test_and_clear_flag(hdev,
4748 						      HCI_MESH_EXPERIMENTAL);
4749 	}
4750 
4751 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4752 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4753 
4754 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4755 
4756 	err = mgmt_cmd_complete(sk, hdev->id,
4757 				MGMT_OP_SET_EXP_FEATURE, 0,
4758 				&rp, sizeof(rp));
4759 
4760 	if (changed)
4761 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4762 
4763 	return err;
4764 }
4765 
4766 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4767 				   struct mgmt_cp_set_exp_feature *cp,
4768 				   u16 data_len)
4769 {
4770 	struct mgmt_rp_set_exp_feature rp;
4771 	bool val, changed;
4772 	int err;
4773 
4774 	/* Command requires to use a valid controller index */
4775 	if (!hdev)
4776 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4777 				       MGMT_OP_SET_EXP_FEATURE,
4778 				       MGMT_STATUS_INVALID_INDEX);
4779 
4780 	/* Parameters are limited to a single octet */
4781 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4782 		return mgmt_cmd_status(sk, hdev->id,
4783 				       MGMT_OP_SET_EXP_FEATURE,
4784 				       MGMT_STATUS_INVALID_PARAMS);
4785 
4786 	/* Only boolean on/off is supported */
4787 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4788 		return mgmt_cmd_status(sk, hdev->id,
4789 				       MGMT_OP_SET_EXP_FEATURE,
4790 				       MGMT_STATUS_INVALID_PARAMS);
4791 
4792 	hci_req_sync_lock(hdev);
4793 
4794 	val = !!cp->param[0];
4795 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4796 
4797 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4798 		err = mgmt_cmd_status(sk, hdev->id,
4799 				      MGMT_OP_SET_EXP_FEATURE,
4800 				      MGMT_STATUS_NOT_SUPPORTED);
4801 		goto unlock_quality_report;
4802 	}
4803 
4804 	if (changed) {
4805 		if (hdev->set_quality_report)
4806 			err = hdev->set_quality_report(hdev, val);
4807 		else
4808 			err = aosp_set_quality_report(hdev, val);
4809 
4810 		if (err) {
4811 			err = mgmt_cmd_status(sk, hdev->id,
4812 					      MGMT_OP_SET_EXP_FEATURE,
4813 					      MGMT_STATUS_FAILED);
4814 			goto unlock_quality_report;
4815 		}
4816 
4817 		if (val)
4818 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4819 		else
4820 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4821 	}
4822 
4823 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4824 
4825 	memcpy(rp.uuid, quality_report_uuid, 16);
4826 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4827 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4828 
4829 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4830 				&rp, sizeof(rp));
4831 
4832 	if (changed)
4833 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4834 
4835 unlock_quality_report:
4836 	hci_req_sync_unlock(hdev);
4837 	return err;
4838 }
4839 
4840 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4841 				  struct mgmt_cp_set_exp_feature *cp,
4842 				  u16 data_len)
4843 {
4844 	bool val, changed;
4845 	int err;
4846 	struct mgmt_rp_set_exp_feature rp;
4847 
4848 	/* Command requires to use a valid controller index */
4849 	if (!hdev)
4850 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4851 				       MGMT_OP_SET_EXP_FEATURE,
4852 				       MGMT_STATUS_INVALID_INDEX);
4853 
4854 	/* Parameters are limited to a single octet */
4855 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4856 		return mgmt_cmd_status(sk, hdev->id,
4857 				       MGMT_OP_SET_EXP_FEATURE,
4858 				       MGMT_STATUS_INVALID_PARAMS);
4859 
4860 	/* Only boolean on/off is supported */
4861 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4862 		return mgmt_cmd_status(sk, hdev->id,
4863 				       MGMT_OP_SET_EXP_FEATURE,
4864 				       MGMT_STATUS_INVALID_PARAMS);
4865 
4866 	val = !!cp->param[0];
4867 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4868 
4869 	if (!hdev->get_data_path_id) {
4870 		return mgmt_cmd_status(sk, hdev->id,
4871 				       MGMT_OP_SET_EXP_FEATURE,
4872 				       MGMT_STATUS_NOT_SUPPORTED);
4873 	}
4874 
4875 	if (changed) {
4876 		if (val)
4877 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4878 		else
4879 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4880 	}
4881 
4882 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4883 		    val, changed);
4884 
4885 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4886 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4887 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4888 	err = mgmt_cmd_complete(sk, hdev->id,
4889 				MGMT_OP_SET_EXP_FEATURE, 0,
4890 				&rp, sizeof(rp));
4891 
4892 	if (changed)
4893 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4894 
4895 	return err;
4896 }
4897 
4898 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4899 					  struct mgmt_cp_set_exp_feature *cp,
4900 					  u16 data_len)
4901 {
4902 	bool val, changed;
4903 	int err;
4904 	struct mgmt_rp_set_exp_feature rp;
4905 
4906 	/* Command requires to use a valid controller index */
4907 	if (!hdev)
4908 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4909 				       MGMT_OP_SET_EXP_FEATURE,
4910 				       MGMT_STATUS_INVALID_INDEX);
4911 
4912 	/* Parameters are limited to a single octet */
4913 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4914 		return mgmt_cmd_status(sk, hdev->id,
4915 				       MGMT_OP_SET_EXP_FEATURE,
4916 				       MGMT_STATUS_INVALID_PARAMS);
4917 
4918 	/* Only boolean on/off is supported */
4919 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4920 		return mgmt_cmd_status(sk, hdev->id,
4921 				       MGMT_OP_SET_EXP_FEATURE,
4922 				       MGMT_STATUS_INVALID_PARAMS);
4923 
4924 	val = !!cp->param[0];
4925 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4926 
4927 	if (!hci_dev_le_state_simultaneous(hdev)) {
4928 		return mgmt_cmd_status(sk, hdev->id,
4929 				       MGMT_OP_SET_EXP_FEATURE,
4930 				       MGMT_STATUS_NOT_SUPPORTED);
4931 	}
4932 
4933 	if (changed) {
4934 		if (val)
4935 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4936 		else
4937 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4938 	}
4939 
4940 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4941 		    val, changed);
4942 
4943 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4944 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4945 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4946 	err = mgmt_cmd_complete(sk, hdev->id,
4947 				MGMT_OP_SET_EXP_FEATURE, 0,
4948 				&rp, sizeof(rp));
4949 
4950 	if (changed)
4951 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4952 
4953 	return err;
4954 }
4955 
4956 #ifdef CONFIG_BT_LE
4957 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4958 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4959 {
4960 	struct mgmt_rp_set_exp_feature rp;
4961 	bool val, changed = false;
4962 	int err;
4963 
4964 	/* Command requires to use the non-controller index */
4965 	if (hdev)
4966 		return mgmt_cmd_status(sk, hdev->id,
4967 				       MGMT_OP_SET_EXP_FEATURE,
4968 				       MGMT_STATUS_INVALID_INDEX);
4969 
4970 	/* Parameters are limited to a single octet */
4971 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4972 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4973 				       MGMT_OP_SET_EXP_FEATURE,
4974 				       MGMT_STATUS_INVALID_PARAMS);
4975 
4976 	/* Only boolean on/off is supported */
4977 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4978 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4979 				       MGMT_OP_SET_EXP_FEATURE,
4980 				       MGMT_STATUS_INVALID_PARAMS);
4981 
4982 	val = cp->param[0] ? true : false;
4983 	if (val)
4984 		err = iso_init();
4985 	else
4986 		err = iso_exit();
4987 
4988 	if (!err)
4989 		changed = true;
4990 
4991 	memcpy(rp.uuid, iso_socket_uuid, 16);
4992 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4993 
4994 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4995 
4996 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4997 				MGMT_OP_SET_EXP_FEATURE, 0,
4998 				&rp, sizeof(rp));
4999 
5000 	if (changed)
5001 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5002 
5003 	return err;
5004 }
5005 #endif
5006 
5007 static const struct mgmt_exp_feature {
5008 	const u8 *uuid;
5009 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5010 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5011 } exp_features[] = {
5012 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5013 #ifdef CONFIG_BT_FEATURE_DEBUG
5014 	EXP_FEAT(debug_uuid, set_debug_func),
5015 #endif
5016 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5017 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5018 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5019 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5020 #ifdef CONFIG_BT_LE
5021 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5022 #endif
5023 
5024 	/* end with a null feature */
5025 	EXP_FEAT(NULL, NULL)
5026 };
5027 
5028 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5029 			   void *data, u16 data_len)
5030 {
5031 	struct mgmt_cp_set_exp_feature *cp = data;
5032 	size_t i = 0;
5033 
5034 	bt_dev_dbg(hdev, "sock %p", sk);
5035 
5036 	for (i = 0; exp_features[i].uuid; i++) {
5037 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5038 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5039 	}
5040 
5041 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5042 			       MGMT_OP_SET_EXP_FEATURE,
5043 			       MGMT_STATUS_NOT_SUPPORTED);
5044 }
5045 
5046 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5047 			    u16 data_len)
5048 {
5049 	struct mgmt_cp_get_device_flags *cp = data;
5050 	struct mgmt_rp_get_device_flags rp;
5051 	struct bdaddr_list_with_flags *br_params;
5052 	struct hci_conn_params *params;
5053 	u32 supported_flags;
5054 	u32 current_flags = 0;
5055 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5056 
5057 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5058 		   &cp->addr.bdaddr, cp->addr.type);
5059 
5060 	hci_dev_lock(hdev);
5061 
5062 	supported_flags = hdev->conn_flags;
5063 
5064 	memset(&rp, 0, sizeof(rp));
5065 
5066 	if (cp->addr.type == BDADDR_BREDR) {
5067 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5068 							      &cp->addr.bdaddr,
5069 							      cp->addr.type);
5070 		if (!br_params)
5071 			goto done;
5072 
5073 		current_flags = br_params->flags;
5074 	} else {
5075 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5076 						le_addr_type(cp->addr.type));
5077 		if (!params)
5078 			goto done;
5079 
5080 		current_flags = params->flags;
5081 	}
5082 
5083 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5084 	rp.addr.type = cp->addr.type;
5085 	rp.supported_flags = cpu_to_le32(supported_flags);
5086 	rp.current_flags = cpu_to_le32(current_flags);
5087 
5088 	status = MGMT_STATUS_SUCCESS;
5089 
5090 done:
5091 	hci_dev_unlock(hdev);
5092 
5093 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5094 				&rp, sizeof(rp));
5095 }
5096 
5097 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5098 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5099 				 u32 supported_flags, u32 current_flags)
5100 {
5101 	struct mgmt_ev_device_flags_changed ev;
5102 
5103 	bacpy(&ev.addr.bdaddr, bdaddr);
5104 	ev.addr.type = bdaddr_type;
5105 	ev.supported_flags = cpu_to_le32(supported_flags);
5106 	ev.current_flags = cpu_to_le32(current_flags);
5107 
5108 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5109 }
5110 
5111 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5112 			    u16 len)
5113 {
5114 	struct mgmt_cp_set_device_flags *cp = data;
5115 	struct bdaddr_list_with_flags *br_params;
5116 	struct hci_conn_params *params;
5117 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5118 	u32 supported_flags;
5119 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5120 
5121 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5122 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5123 
5124 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5125 	supported_flags = hdev->conn_flags;
5126 
5127 	if ((supported_flags | current_flags) != supported_flags) {
5128 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5129 			    current_flags, supported_flags);
5130 		goto done;
5131 	}
5132 
5133 	hci_dev_lock(hdev);
5134 
5135 	if (cp->addr.type == BDADDR_BREDR) {
5136 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5137 							      &cp->addr.bdaddr,
5138 							      cp->addr.type);
5139 
5140 		if (br_params) {
5141 			br_params->flags = current_flags;
5142 			status = MGMT_STATUS_SUCCESS;
5143 		} else {
5144 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5145 				    &cp->addr.bdaddr, cp->addr.type);
5146 		}
5147 
5148 		goto unlock;
5149 	}
5150 
5151 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5152 					le_addr_type(cp->addr.type));
5153 	if (!params) {
5154 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5155 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5156 		goto unlock;
5157 	}
5158 
5159 	supported_flags = hdev->conn_flags;
5160 
5161 	if ((supported_flags | current_flags) != supported_flags) {
5162 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5163 			    current_flags, supported_flags);
5164 		goto unlock;
5165 	}
5166 
5167 	WRITE_ONCE(params->flags, current_flags);
5168 	status = MGMT_STATUS_SUCCESS;
5169 
5170 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5171 	 * has been set.
5172 	 */
5173 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5174 		hci_update_passive_scan(hdev);
5175 
5176 unlock:
5177 	hci_dev_unlock(hdev);
5178 
5179 done:
5180 	if (status == MGMT_STATUS_SUCCESS)
5181 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5182 				     supported_flags, current_flags);
5183 
5184 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5185 				 &cp->addr, sizeof(cp->addr));
5186 }
5187 
5188 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5189 				   u16 handle)
5190 {
5191 	struct mgmt_ev_adv_monitor_added ev;
5192 
5193 	ev.monitor_handle = cpu_to_le16(handle);
5194 
5195 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5196 }
5197 
5198 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5199 				     __le16 handle)
5200 {
5201 	struct mgmt_ev_adv_monitor_removed ev;
5202 
5203 	ev.monitor_handle = handle;
5204 
5205 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5206 }
5207 
5208 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5209 				 void *data, u16 len)
5210 {
5211 	struct adv_monitor *monitor = NULL;
5212 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5213 	int handle, err;
5214 	size_t rp_size = 0;
5215 	__u32 supported = 0;
5216 	__u32 enabled = 0;
5217 	__u16 num_handles = 0;
5218 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5219 
5220 	BT_DBG("request for %s", hdev->name);
5221 
5222 	hci_dev_lock(hdev);
5223 
5224 	if (msft_monitor_supported(hdev))
5225 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5226 
5227 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5228 		handles[num_handles++] = monitor->handle;
5229 
5230 	hci_dev_unlock(hdev);
5231 
5232 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5233 	rp = kmalloc(rp_size, GFP_KERNEL);
5234 	if (!rp)
5235 		return -ENOMEM;
5236 
5237 	/* All supported features are currently enabled */
5238 	enabled = supported;
5239 
5240 	rp->supported_features = cpu_to_le32(supported);
5241 	rp->enabled_features = cpu_to_le32(enabled);
5242 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5243 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5244 	rp->num_handles = cpu_to_le16(num_handles);
5245 	if (num_handles)
5246 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5247 
5248 	err = mgmt_cmd_complete(sk, hdev->id,
5249 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5250 				MGMT_STATUS_SUCCESS, rp, rp_size);
5251 
5252 	kfree(rp);
5253 
5254 	return err;
5255 }
5256 
5257 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5258 						   void *data, int status)
5259 {
5260 	struct mgmt_rp_add_adv_patterns_monitor rp;
5261 	struct mgmt_pending_cmd *cmd = data;
5262 	struct adv_monitor *monitor;
5263 
5264 	/* This is likely the result of hdev being closed and mgmt_index_removed
5265 	 * is attempting to clean up any pending command so
5266 	 * hci_adv_monitors_clear is about to be called which will take care of
5267 	 * freeing the adv_monitor instances.
5268 	 */
5269 	if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5270 		return;
5271 
5272 	monitor = cmd->user_data;
5273 
5274 	hci_dev_lock(hdev);
5275 
5276 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5277 
5278 	if (!status) {
5279 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5280 		hdev->adv_monitors_cnt++;
5281 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5282 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5283 		hci_update_passive_scan(hdev);
5284 	}
5285 
5286 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5287 			  mgmt_status(status), &rp, sizeof(rp));
5288 	mgmt_pending_remove(cmd);
5289 
5290 	hci_dev_unlock(hdev);
5291 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5292 		   rp.monitor_handle, status);
5293 }
5294 
5295 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5296 {
5297 	struct mgmt_pending_cmd *cmd = data;
5298 	struct adv_monitor *mon;
5299 
5300 	mutex_lock(&hdev->mgmt_pending_lock);
5301 
5302 	if (!__mgmt_pending_listed(hdev, cmd)) {
5303 		mutex_unlock(&hdev->mgmt_pending_lock);
5304 		return -ECANCELED;
5305 	}
5306 
5307 	mon = cmd->user_data;
5308 
5309 	mutex_unlock(&hdev->mgmt_pending_lock);
5310 
5311 	return hci_add_adv_monitor(hdev, mon);
5312 }
5313 
5314 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5315 				      struct adv_monitor *m, u8 status,
5316 				      void *data, u16 len, u16 op)
5317 {
5318 	struct mgmt_pending_cmd *cmd;
5319 	int err;
5320 
5321 	hci_dev_lock(hdev);
5322 
5323 	if (status)
5324 		goto unlock;
5325 
5326 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5327 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5328 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5329 		status = MGMT_STATUS_BUSY;
5330 		goto unlock;
5331 	}
5332 
5333 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5334 	if (!cmd) {
5335 		status = MGMT_STATUS_NO_RESOURCES;
5336 		goto unlock;
5337 	}
5338 
5339 	cmd->user_data = m;
5340 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5341 				 mgmt_add_adv_patterns_monitor_complete);
5342 	if (err) {
5343 		if (err == -ENOMEM)
5344 			status = MGMT_STATUS_NO_RESOURCES;
5345 		else
5346 			status = MGMT_STATUS_FAILED;
5347 
5348 		goto unlock;
5349 	}
5350 
5351 	hci_dev_unlock(hdev);
5352 
5353 	return 0;
5354 
5355 unlock:
5356 	hci_free_adv_monitor(hdev, m);
5357 	hci_dev_unlock(hdev);
5358 	return mgmt_cmd_status(sk, hdev->id, op, status);
5359 }
5360 
5361 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5362 				   struct mgmt_adv_rssi_thresholds *rssi)
5363 {
5364 	if (rssi) {
5365 		m->rssi.low_threshold = rssi->low_threshold;
5366 		m->rssi.low_threshold_timeout =
5367 		    __le16_to_cpu(rssi->low_threshold_timeout);
5368 		m->rssi.high_threshold = rssi->high_threshold;
5369 		m->rssi.high_threshold_timeout =
5370 		    __le16_to_cpu(rssi->high_threshold_timeout);
5371 		m->rssi.sampling_period = rssi->sampling_period;
5372 	} else {
5373 		/* Default values. These numbers are the least constricting
5374 		 * parameters for MSFT API to work, so it behaves as if there
5375 		 * are no rssi parameter to consider. May need to be changed
5376 		 * if other API are to be supported.
5377 		 */
5378 		m->rssi.low_threshold = -127;
5379 		m->rssi.low_threshold_timeout = 60;
5380 		m->rssi.high_threshold = -127;
5381 		m->rssi.high_threshold_timeout = 0;
5382 		m->rssi.sampling_period = 0;
5383 	}
5384 }
5385 
5386 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5387 				    struct mgmt_adv_pattern *patterns)
5388 {
5389 	u8 offset = 0, length = 0;
5390 	struct adv_pattern *p = NULL;
5391 	int i;
5392 
5393 	for (i = 0; i < pattern_count; i++) {
5394 		offset = patterns[i].offset;
5395 		length = patterns[i].length;
5396 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5397 		    length > HCI_MAX_EXT_AD_LENGTH ||
5398 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5399 			return MGMT_STATUS_INVALID_PARAMS;
5400 
5401 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5402 		if (!p)
5403 			return MGMT_STATUS_NO_RESOURCES;
5404 
5405 		p->ad_type = patterns[i].ad_type;
5406 		p->offset = patterns[i].offset;
5407 		p->length = patterns[i].length;
5408 		memcpy(p->value, patterns[i].value, p->length);
5409 
5410 		INIT_LIST_HEAD(&p->list);
5411 		list_add(&p->list, &m->patterns);
5412 	}
5413 
5414 	return MGMT_STATUS_SUCCESS;
5415 }
5416 
5417 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5418 				    void *data, u16 len)
5419 {
5420 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5421 	struct adv_monitor *m = NULL;
5422 	u8 status = MGMT_STATUS_SUCCESS;
5423 	size_t expected_size = sizeof(*cp);
5424 
5425 	BT_DBG("request for %s", hdev->name);
5426 
5427 	if (len <= sizeof(*cp)) {
5428 		status = MGMT_STATUS_INVALID_PARAMS;
5429 		goto done;
5430 	}
5431 
5432 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5433 	if (len != expected_size) {
5434 		status = MGMT_STATUS_INVALID_PARAMS;
5435 		goto done;
5436 	}
5437 
5438 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5439 	if (!m) {
5440 		status = MGMT_STATUS_NO_RESOURCES;
5441 		goto done;
5442 	}
5443 
5444 	INIT_LIST_HEAD(&m->patterns);
5445 
5446 	parse_adv_monitor_rssi(m, NULL);
5447 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5448 
5449 done:
5450 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5451 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5452 }
5453 
5454 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5455 					 void *data, u16 len)
5456 {
5457 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5458 	struct adv_monitor *m = NULL;
5459 	u8 status = MGMT_STATUS_SUCCESS;
5460 	size_t expected_size = sizeof(*cp);
5461 
5462 	BT_DBG("request for %s", hdev->name);
5463 
5464 	if (len <= sizeof(*cp)) {
5465 		status = MGMT_STATUS_INVALID_PARAMS;
5466 		goto done;
5467 	}
5468 
5469 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5470 	if (len != expected_size) {
5471 		status = MGMT_STATUS_INVALID_PARAMS;
5472 		goto done;
5473 	}
5474 
5475 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5476 	if (!m) {
5477 		status = MGMT_STATUS_NO_RESOURCES;
5478 		goto done;
5479 	}
5480 
5481 	INIT_LIST_HEAD(&m->patterns);
5482 
5483 	parse_adv_monitor_rssi(m, &cp->rssi);
5484 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5485 
5486 done:
5487 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5488 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5489 }
5490 
5491 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5492 					     void *data, int status)
5493 {
5494 	struct mgmt_rp_remove_adv_monitor rp;
5495 	struct mgmt_pending_cmd *cmd = data;
5496 	struct mgmt_cp_remove_adv_monitor *cp;
5497 
5498 	if (status == -ECANCELED)
5499 		return;
5500 
5501 	hci_dev_lock(hdev);
5502 
5503 	cp = cmd->param;
5504 
5505 	rp.monitor_handle = cp->monitor_handle;
5506 
5507 	if (!status) {
5508 		mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5509 		hci_update_passive_scan(hdev);
5510 	}
5511 
5512 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5513 			  mgmt_status(status), &rp, sizeof(rp));
5514 	mgmt_pending_free(cmd);
5515 
5516 	hci_dev_unlock(hdev);
5517 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5518 		   rp.monitor_handle, status);
5519 }
5520 
5521 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5522 {
5523 	struct mgmt_pending_cmd *cmd = data;
5524 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5525 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5526 
5527 	if (!handle)
5528 		return hci_remove_all_adv_monitor(hdev);
5529 
5530 	return hci_remove_single_adv_monitor(hdev, handle);
5531 }
5532 
5533 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5534 			      void *data, u16 len)
5535 {
5536 	struct mgmt_pending_cmd *cmd;
5537 	int err, status;
5538 
5539 	hci_dev_lock(hdev);
5540 
5541 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5542 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5543 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5544 		status = MGMT_STATUS_BUSY;
5545 		goto unlock;
5546 	}
5547 
5548 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5549 	if (!cmd) {
5550 		status = MGMT_STATUS_NO_RESOURCES;
5551 		goto unlock;
5552 	}
5553 
5554 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5555 				  mgmt_remove_adv_monitor_complete);
5556 
5557 	if (err) {
5558 		mgmt_pending_free(cmd);
5559 
5560 		if (err == -ENOMEM)
5561 			status = MGMT_STATUS_NO_RESOURCES;
5562 		else
5563 			status = MGMT_STATUS_FAILED;
5564 
5565 		goto unlock;
5566 	}
5567 
5568 	hci_dev_unlock(hdev);
5569 
5570 	return 0;
5571 
5572 unlock:
5573 	hci_dev_unlock(hdev);
5574 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5575 			       status);
5576 }
5577 
5578 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5579 					 int err)
5580 {
5581 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5582 	size_t rp_size = sizeof(mgmt_rp);
5583 	struct mgmt_pending_cmd *cmd = data;
5584 	struct sk_buff *skb = cmd->skb;
5585 	u8 status = mgmt_status(err);
5586 
5587 	if (!status) {
5588 		if (!skb)
5589 			status = MGMT_STATUS_FAILED;
5590 		else if (IS_ERR(skb))
5591 			status = mgmt_status(PTR_ERR(skb));
5592 		else
5593 			status = mgmt_status(skb->data[0]);
5594 	}
5595 
5596 	bt_dev_dbg(hdev, "status %d", status);
5597 
5598 	if (status) {
5599 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5600 				status);
5601 		goto remove;
5602 	}
5603 
5604 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5605 
5606 	if (!bredr_sc_enabled(hdev)) {
5607 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5608 
5609 		if (skb->len < sizeof(*rp)) {
5610 			mgmt_cmd_status(cmd->sk, hdev->id,
5611 					MGMT_OP_READ_LOCAL_OOB_DATA,
5612 					MGMT_STATUS_FAILED);
5613 			goto remove;
5614 		}
5615 
5616 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5617 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5618 
5619 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5620 	} else {
5621 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5622 
5623 		if (skb->len < sizeof(*rp)) {
5624 			mgmt_cmd_status(cmd->sk, hdev->id,
5625 					MGMT_OP_READ_LOCAL_OOB_DATA,
5626 					MGMT_STATUS_FAILED);
5627 			goto remove;
5628 		}
5629 
5630 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5631 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5632 
5633 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5634 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5635 	}
5636 
5637 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5638 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5639 
5640 remove:
5641 	if (skb && !IS_ERR(skb))
5642 		kfree_skb(skb);
5643 
5644 	mgmt_pending_free(cmd);
5645 }
5646 
5647 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5648 {
5649 	struct mgmt_pending_cmd *cmd = data;
5650 
5651 	if (bredr_sc_enabled(hdev))
5652 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5653 	else
5654 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5655 
5656 	if (IS_ERR(cmd->skb))
5657 		return PTR_ERR(cmd->skb);
5658 	else
5659 		return 0;
5660 }
5661 
5662 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5663 			       void *data, u16 data_len)
5664 {
5665 	struct mgmt_pending_cmd *cmd;
5666 	int err;
5667 
5668 	bt_dev_dbg(hdev, "sock %p", sk);
5669 
5670 	hci_dev_lock(hdev);
5671 
5672 	if (!hdev_is_powered(hdev)) {
5673 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5674 				      MGMT_STATUS_NOT_POWERED);
5675 		goto unlock;
5676 	}
5677 
5678 	if (!lmp_ssp_capable(hdev)) {
5679 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5680 				      MGMT_STATUS_NOT_SUPPORTED);
5681 		goto unlock;
5682 	}
5683 
5684 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5685 	if (!cmd)
5686 		err = -ENOMEM;
5687 	else
5688 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5689 					 read_local_oob_data_complete);
5690 
5691 	if (err < 0) {
5692 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5693 				      MGMT_STATUS_FAILED);
5694 
5695 		if (cmd)
5696 			mgmt_pending_free(cmd);
5697 	}
5698 
5699 unlock:
5700 	hci_dev_unlock(hdev);
5701 	return err;
5702 }
5703 
5704 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5705 			       void *data, u16 len)
5706 {
5707 	struct mgmt_addr_info *addr = data;
5708 	int err;
5709 
5710 	bt_dev_dbg(hdev, "sock %p", sk);
5711 
5712 	if (!bdaddr_type_is_valid(addr->type))
5713 		return mgmt_cmd_complete(sk, hdev->id,
5714 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5715 					 MGMT_STATUS_INVALID_PARAMS,
5716 					 addr, sizeof(*addr));
5717 
5718 	hci_dev_lock(hdev);
5719 
5720 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5721 		struct mgmt_cp_add_remote_oob_data *cp = data;
5722 		u8 status;
5723 
5724 		if (cp->addr.type != BDADDR_BREDR) {
5725 			err = mgmt_cmd_complete(sk, hdev->id,
5726 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5727 						MGMT_STATUS_INVALID_PARAMS,
5728 						&cp->addr, sizeof(cp->addr));
5729 			goto unlock;
5730 		}
5731 
5732 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5733 					      cp->addr.type, cp->hash,
5734 					      cp->rand, NULL, NULL);
5735 		if (err < 0)
5736 			status = MGMT_STATUS_FAILED;
5737 		else
5738 			status = MGMT_STATUS_SUCCESS;
5739 
5740 		err = mgmt_cmd_complete(sk, hdev->id,
5741 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5742 					&cp->addr, sizeof(cp->addr));
5743 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5744 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5745 		u8 *rand192, *hash192, *rand256, *hash256;
5746 		u8 status;
5747 
5748 		if (bdaddr_type_is_le(cp->addr.type)) {
5749 			/* Enforce zero-valued 192-bit parameters as
5750 			 * long as legacy SMP OOB isn't implemented.
5751 			 */
5752 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5753 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5754 				err = mgmt_cmd_complete(sk, hdev->id,
5755 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5756 							MGMT_STATUS_INVALID_PARAMS,
5757 							addr, sizeof(*addr));
5758 				goto unlock;
5759 			}
5760 
5761 			rand192 = NULL;
5762 			hash192 = NULL;
5763 		} else {
5764 			/* In case one of the P-192 values is set to zero,
5765 			 * then just disable OOB data for P-192.
5766 			 */
5767 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5768 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5769 				rand192 = NULL;
5770 				hash192 = NULL;
5771 			} else {
5772 				rand192 = cp->rand192;
5773 				hash192 = cp->hash192;
5774 			}
5775 		}
5776 
5777 		/* In case one of the P-256 values is set to zero, then just
5778 		 * disable OOB data for P-256.
5779 		 */
5780 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5781 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5782 			rand256 = NULL;
5783 			hash256 = NULL;
5784 		} else {
5785 			rand256 = cp->rand256;
5786 			hash256 = cp->hash256;
5787 		}
5788 
5789 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5790 					      cp->addr.type, hash192, rand192,
5791 					      hash256, rand256);
5792 		if (err < 0)
5793 			status = MGMT_STATUS_FAILED;
5794 		else
5795 			status = MGMT_STATUS_SUCCESS;
5796 
5797 		err = mgmt_cmd_complete(sk, hdev->id,
5798 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5799 					status, &cp->addr, sizeof(cp->addr));
5800 	} else {
5801 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5802 			   len);
5803 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5804 				      MGMT_STATUS_INVALID_PARAMS);
5805 	}
5806 
5807 unlock:
5808 	hci_dev_unlock(hdev);
5809 	return err;
5810 }
5811 
5812 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5813 				  void *data, u16 len)
5814 {
5815 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5816 	u8 status;
5817 	int err;
5818 
5819 	bt_dev_dbg(hdev, "sock %p", sk);
5820 
5821 	if (cp->addr.type != BDADDR_BREDR)
5822 		return mgmt_cmd_complete(sk, hdev->id,
5823 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5824 					 MGMT_STATUS_INVALID_PARAMS,
5825 					 &cp->addr, sizeof(cp->addr));
5826 
5827 	hci_dev_lock(hdev);
5828 
5829 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5830 		hci_remote_oob_data_clear(hdev);
5831 		status = MGMT_STATUS_SUCCESS;
5832 		goto done;
5833 	}
5834 
5835 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5836 	if (err < 0)
5837 		status = MGMT_STATUS_INVALID_PARAMS;
5838 	else
5839 		status = MGMT_STATUS_SUCCESS;
5840 
5841 done:
5842 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5843 				status, &cp->addr, sizeof(cp->addr));
5844 
5845 	hci_dev_unlock(hdev);
5846 	return err;
5847 }
5848 
5849 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5850 				    uint8_t *mgmt_status)
5851 {
5852 	switch (type) {
5853 	case DISCOV_TYPE_LE:
5854 		*mgmt_status = mgmt_le_support(hdev);
5855 		if (*mgmt_status)
5856 			return false;
5857 		break;
5858 	case DISCOV_TYPE_INTERLEAVED:
5859 		*mgmt_status = mgmt_le_support(hdev);
5860 		if (*mgmt_status)
5861 			return false;
5862 		fallthrough;
5863 	case DISCOV_TYPE_BREDR:
5864 		*mgmt_status = mgmt_bredr_support(hdev);
5865 		if (*mgmt_status)
5866 			return false;
5867 		break;
5868 	default:
5869 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5870 		return false;
5871 	}
5872 
5873 	return true;
5874 }
5875 
5876 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5877 {
5878 	struct mgmt_pending_cmd *cmd = data;
5879 
5880 	bt_dev_dbg(hdev, "err %d", err);
5881 
5882 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5883 		return;
5884 
5885 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5886 			  cmd->param, 1);
5887 	mgmt_pending_free(cmd);
5888 
5889 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5890 				DISCOVERY_FINDING);
5891 }
5892 
5893 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5894 {
5895 	if (!mgmt_pending_listed(hdev, data))
5896 		return -ECANCELED;
5897 
5898 	return hci_start_discovery_sync(hdev);
5899 }
5900 
5901 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5902 				    u16 op, void *data, u16 len)
5903 {
5904 	struct mgmt_cp_start_discovery *cp = data;
5905 	struct mgmt_pending_cmd *cmd;
5906 	u8 status;
5907 	int err;
5908 
5909 	bt_dev_dbg(hdev, "sock %p", sk);
5910 
5911 	hci_dev_lock(hdev);
5912 
5913 	if (!hdev_is_powered(hdev)) {
5914 		err = mgmt_cmd_complete(sk, hdev->id, op,
5915 					MGMT_STATUS_NOT_POWERED,
5916 					&cp->type, sizeof(cp->type));
5917 		goto failed;
5918 	}
5919 
5920 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5921 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5922 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5923 					&cp->type, sizeof(cp->type));
5924 		goto failed;
5925 	}
5926 
5927 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5928 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5929 					&cp->type, sizeof(cp->type));
5930 		goto failed;
5931 	}
5932 
5933 	/* Can't start discovery when it is paused */
5934 	if (hdev->discovery_paused) {
5935 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5936 					&cp->type, sizeof(cp->type));
5937 		goto failed;
5938 	}
5939 
5940 	/* Clear the discovery filter first to free any previously
5941 	 * allocated memory for the UUID list.
5942 	 */
5943 	hci_discovery_filter_clear(hdev);
5944 
5945 	hdev->discovery.type = cp->type;
5946 	hdev->discovery.report_invalid_rssi = false;
5947 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5948 		hdev->discovery.limited = true;
5949 	else
5950 		hdev->discovery.limited = false;
5951 
5952 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5953 	if (!cmd) {
5954 		err = -ENOMEM;
5955 		goto failed;
5956 	}
5957 
5958 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5959 				 start_discovery_complete);
5960 	if (err < 0) {
5961 		mgmt_pending_remove(cmd);
5962 		goto failed;
5963 	}
5964 
5965 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5966 
5967 failed:
5968 	hci_dev_unlock(hdev);
5969 	return err;
5970 }
5971 
5972 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5973 			   void *data, u16 len)
5974 {
5975 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5976 					data, len);
5977 }
5978 
5979 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5980 				   void *data, u16 len)
5981 {
5982 	return start_discovery_internal(sk, hdev,
5983 					MGMT_OP_START_LIMITED_DISCOVERY,
5984 					data, len);
5985 }
5986 
5987 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5988 				   void *data, u16 len)
5989 {
5990 	struct mgmt_cp_start_service_discovery *cp = data;
5991 	struct mgmt_pending_cmd *cmd;
5992 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5993 	u16 uuid_count, expected_len;
5994 	u8 status;
5995 	int err;
5996 
5997 	bt_dev_dbg(hdev, "sock %p", sk);
5998 
5999 	hci_dev_lock(hdev);
6000 
6001 	if (!hdev_is_powered(hdev)) {
6002 		err = mgmt_cmd_complete(sk, hdev->id,
6003 					MGMT_OP_START_SERVICE_DISCOVERY,
6004 					MGMT_STATUS_NOT_POWERED,
6005 					&cp->type, sizeof(cp->type));
6006 		goto failed;
6007 	}
6008 
6009 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6010 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6011 		err = mgmt_cmd_complete(sk, hdev->id,
6012 					MGMT_OP_START_SERVICE_DISCOVERY,
6013 					MGMT_STATUS_BUSY, &cp->type,
6014 					sizeof(cp->type));
6015 		goto failed;
6016 	}
6017 
6018 	if (hdev->discovery_paused) {
6019 		err = mgmt_cmd_complete(sk, hdev->id,
6020 					MGMT_OP_START_SERVICE_DISCOVERY,
6021 					MGMT_STATUS_BUSY, &cp->type,
6022 					sizeof(cp->type));
6023 		goto failed;
6024 	}
6025 
6026 	uuid_count = __le16_to_cpu(cp->uuid_count);
6027 	if (uuid_count > max_uuid_count) {
6028 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6029 			   uuid_count);
6030 		err = mgmt_cmd_complete(sk, hdev->id,
6031 					MGMT_OP_START_SERVICE_DISCOVERY,
6032 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6033 					sizeof(cp->type));
6034 		goto failed;
6035 	}
6036 
6037 	expected_len = sizeof(*cp) + uuid_count * 16;
6038 	if (expected_len != len) {
6039 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6040 			   expected_len, len);
6041 		err = mgmt_cmd_complete(sk, hdev->id,
6042 					MGMT_OP_START_SERVICE_DISCOVERY,
6043 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6044 					sizeof(cp->type));
6045 		goto failed;
6046 	}
6047 
6048 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6049 		err = mgmt_cmd_complete(sk, hdev->id,
6050 					MGMT_OP_START_SERVICE_DISCOVERY,
6051 					status, &cp->type, sizeof(cp->type));
6052 		goto failed;
6053 	}
6054 
6055 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6056 			       hdev, data, len);
6057 	if (!cmd) {
6058 		err = -ENOMEM;
6059 		goto failed;
6060 	}
6061 
6062 	/* Clear the discovery filter first to free any previously
6063 	 * allocated memory for the UUID list.
6064 	 */
6065 	hci_discovery_filter_clear(hdev);
6066 
6067 	hdev->discovery.result_filtering = true;
6068 	hdev->discovery.type = cp->type;
6069 	hdev->discovery.rssi = cp->rssi;
6070 	hdev->discovery.uuid_count = uuid_count;
6071 
6072 	if (uuid_count > 0) {
6073 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6074 						GFP_KERNEL);
6075 		if (!hdev->discovery.uuids) {
6076 			err = mgmt_cmd_complete(sk, hdev->id,
6077 						MGMT_OP_START_SERVICE_DISCOVERY,
6078 						MGMT_STATUS_FAILED,
6079 						&cp->type, sizeof(cp->type));
6080 			mgmt_pending_remove(cmd);
6081 			goto failed;
6082 		}
6083 	}
6084 
6085 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6086 				 start_discovery_complete);
6087 	if (err < 0) {
6088 		mgmt_pending_remove(cmd);
6089 		goto failed;
6090 	}
6091 
6092 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6093 
6094 failed:
6095 	hci_dev_unlock(hdev);
6096 	return err;
6097 }
6098 
6099 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6100 {
6101 	struct mgmt_pending_cmd *cmd = data;
6102 
6103 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6104 		return;
6105 
6106 	bt_dev_dbg(hdev, "err %d", err);
6107 
6108 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6109 			  cmd->param, 1);
6110 	mgmt_pending_free(cmd);
6111 
6112 	if (!err)
6113 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6114 }
6115 
6116 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6117 {
6118 	if (!mgmt_pending_listed(hdev, data))
6119 		return -ECANCELED;
6120 
6121 	return hci_stop_discovery_sync(hdev);
6122 }
6123 
6124 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6125 			  u16 len)
6126 {
6127 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6128 	struct mgmt_pending_cmd *cmd;
6129 	int err;
6130 
6131 	bt_dev_dbg(hdev, "sock %p", sk);
6132 
6133 	hci_dev_lock(hdev);
6134 
6135 	if (!hci_discovery_active(hdev)) {
6136 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6137 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6138 					sizeof(mgmt_cp->type));
6139 		goto unlock;
6140 	}
6141 
6142 	if (hdev->discovery.type != mgmt_cp->type) {
6143 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6144 					MGMT_STATUS_INVALID_PARAMS,
6145 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6146 		goto unlock;
6147 	}
6148 
6149 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6150 	if (!cmd) {
6151 		err = -ENOMEM;
6152 		goto unlock;
6153 	}
6154 
6155 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6156 				 stop_discovery_complete);
6157 	if (err < 0) {
6158 		mgmt_pending_remove(cmd);
6159 		goto unlock;
6160 	}
6161 
6162 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6163 
6164 unlock:
6165 	hci_dev_unlock(hdev);
6166 	return err;
6167 }
6168 
6169 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6170 			u16 len)
6171 {
6172 	struct mgmt_cp_confirm_name *cp = data;
6173 	struct inquiry_entry *e;
6174 	int err;
6175 
6176 	bt_dev_dbg(hdev, "sock %p", sk);
6177 
6178 	hci_dev_lock(hdev);
6179 
6180 	if (!hci_discovery_active(hdev)) {
6181 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6182 					MGMT_STATUS_FAILED, &cp->addr,
6183 					sizeof(cp->addr));
6184 		goto failed;
6185 	}
6186 
6187 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6188 	if (!e) {
6189 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6190 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6191 					sizeof(cp->addr));
6192 		goto failed;
6193 	}
6194 
6195 	if (cp->name_known) {
6196 		e->name_state = NAME_KNOWN;
6197 		list_del(&e->list);
6198 	} else {
6199 		e->name_state = NAME_NEEDED;
6200 		hci_inquiry_cache_update_resolve(hdev, e);
6201 	}
6202 
6203 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6204 				&cp->addr, sizeof(cp->addr));
6205 
6206 failed:
6207 	hci_dev_unlock(hdev);
6208 	return err;
6209 }
6210 
6211 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6212 			u16 len)
6213 {
6214 	struct mgmt_cp_block_device *cp = data;
6215 	u8 status;
6216 	int err;
6217 
6218 	bt_dev_dbg(hdev, "sock %p", sk);
6219 
6220 	if (!bdaddr_type_is_valid(cp->addr.type))
6221 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6222 					 MGMT_STATUS_INVALID_PARAMS,
6223 					 &cp->addr, sizeof(cp->addr));
6224 
6225 	hci_dev_lock(hdev);
6226 
6227 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6228 				  cp->addr.type);
6229 	if (err < 0) {
6230 		status = MGMT_STATUS_FAILED;
6231 		goto done;
6232 	}
6233 
6234 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6235 		   sk);
6236 	status = MGMT_STATUS_SUCCESS;
6237 
6238 done:
6239 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6240 				&cp->addr, sizeof(cp->addr));
6241 
6242 	hci_dev_unlock(hdev);
6243 
6244 	return err;
6245 }
6246 
6247 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6248 			  u16 len)
6249 {
6250 	struct mgmt_cp_unblock_device *cp = data;
6251 	u8 status;
6252 	int err;
6253 
6254 	bt_dev_dbg(hdev, "sock %p", sk);
6255 
6256 	if (!bdaddr_type_is_valid(cp->addr.type))
6257 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6258 					 MGMT_STATUS_INVALID_PARAMS,
6259 					 &cp->addr, sizeof(cp->addr));
6260 
6261 	hci_dev_lock(hdev);
6262 
6263 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6264 				  cp->addr.type);
6265 	if (err < 0) {
6266 		status = MGMT_STATUS_INVALID_PARAMS;
6267 		goto done;
6268 	}
6269 
6270 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6271 		   sk);
6272 	status = MGMT_STATUS_SUCCESS;
6273 
6274 done:
6275 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6276 				&cp->addr, sizeof(cp->addr));
6277 
6278 	hci_dev_unlock(hdev);
6279 
6280 	return err;
6281 }
6282 
6283 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6284 {
6285 	return hci_update_eir_sync(hdev);
6286 }
6287 
6288 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6289 			 u16 len)
6290 {
6291 	struct mgmt_cp_set_device_id *cp = data;
6292 	int err;
6293 	__u16 source;
6294 
6295 	bt_dev_dbg(hdev, "sock %p", sk);
6296 
6297 	source = __le16_to_cpu(cp->source);
6298 
6299 	if (source > 0x0002)
6300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6301 				       MGMT_STATUS_INVALID_PARAMS);
6302 
6303 	hci_dev_lock(hdev);
6304 
6305 	hdev->devid_source = source;
6306 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6307 	hdev->devid_product = __le16_to_cpu(cp->product);
6308 	hdev->devid_version = __le16_to_cpu(cp->version);
6309 
6310 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6311 				NULL, 0);
6312 
6313 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6314 
6315 	hci_dev_unlock(hdev);
6316 
6317 	return err;
6318 }
6319 
6320 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6321 {
6322 	if (err)
6323 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6324 	else
6325 		bt_dev_dbg(hdev, "status %d", err);
6326 }
6327 
6328 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6329 {
6330 	struct mgmt_pending_cmd *cmd = data;
6331 	struct cmd_lookup match = { NULL, hdev };
6332 	u8 instance;
6333 	struct adv_info *adv_instance;
6334 	u8 status = mgmt_status(err);
6335 
6336 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6337 		return;
6338 
6339 	if (status) {
6340 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6341 		mgmt_pending_free(cmd);
6342 		return;
6343 	}
6344 
6345 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6346 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6347 	else
6348 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6349 
6350 	settings_rsp(cmd, &match);
6351 
6352 	new_settings(hdev, match.sk);
6353 
6354 	if (match.sk)
6355 		sock_put(match.sk);
6356 
6357 	/* If "Set Advertising" was just disabled and instance advertising was
6358 	 * set up earlier, then re-enable multi-instance advertising.
6359 	 */
6360 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6361 	    list_empty(&hdev->adv_instances))
6362 		return;
6363 
6364 	instance = hdev->cur_adv_instance;
6365 	if (!instance) {
6366 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6367 							struct adv_info, list);
6368 		if (!adv_instance)
6369 			return;
6370 
6371 		instance = adv_instance->instance;
6372 	}
6373 
6374 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6375 
6376 	enable_advertising_instance(hdev, err);
6377 }
6378 
6379 static int set_adv_sync(struct hci_dev *hdev, void *data)
6380 {
6381 	struct mgmt_pending_cmd *cmd = data;
6382 	struct mgmt_mode cp;
6383 	u8 val;
6384 
6385 	mutex_lock(&hdev->mgmt_pending_lock);
6386 
6387 	if (!__mgmt_pending_listed(hdev, cmd)) {
6388 		mutex_unlock(&hdev->mgmt_pending_lock);
6389 		return -ECANCELED;
6390 	}
6391 
6392 	memcpy(&cp, cmd->param, sizeof(cp));
6393 
6394 	mutex_unlock(&hdev->mgmt_pending_lock);
6395 
6396 	val = !!cp.val;
6397 
6398 	if (cp.val == 0x02)
6399 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6400 	else
6401 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402 
6403 	cancel_adv_timeout(hdev);
6404 
6405 	if (val) {
6406 		/* Switch to instance "0" for the Set Advertising setting.
6407 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6408 		 * HCI_ADVERTISING flag is not yet set.
6409 		 */
6410 		hdev->cur_adv_instance = 0x00;
6411 
6412 		if (ext_adv_capable(hdev)) {
6413 			hci_start_ext_adv_sync(hdev, 0x00);
6414 		} else {
6415 			hci_update_adv_data_sync(hdev, 0x00);
6416 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6417 			hci_enable_advertising_sync(hdev);
6418 		}
6419 	} else {
6420 		hci_disable_advertising_sync(hdev);
6421 	}
6422 
6423 	return 0;
6424 }
6425 
6426 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6427 			   u16 len)
6428 {
6429 	struct mgmt_mode *cp = data;
6430 	struct mgmt_pending_cmd *cmd;
6431 	u8 val, status;
6432 	int err;
6433 
6434 	bt_dev_dbg(hdev, "sock %p", sk);
6435 
6436 	status = mgmt_le_support(hdev);
6437 	if (status)
6438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6439 				       status);
6440 
6441 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6443 				       MGMT_STATUS_INVALID_PARAMS);
6444 
6445 	if (hdev->advertising_paused)
6446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6447 				       MGMT_STATUS_BUSY);
6448 
6449 	hci_dev_lock(hdev);
6450 
6451 	val = !!cp->val;
6452 
6453 	/* The following conditions are ones which mean that we should
6454 	 * not do any HCI communication but directly send a mgmt
6455 	 * response to user space (after toggling the flag if
6456 	 * necessary).
6457 	 */
6458 	if (!hdev_is_powered(hdev) ||
6459 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6460 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6461 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6462 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6463 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6464 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6465 		bool changed;
6466 
6467 		if (cp->val) {
6468 			hdev->cur_adv_instance = 0x00;
6469 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6470 			if (cp->val == 0x02)
6471 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6472 			else
6473 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 		} else {
6475 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6476 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 		}
6478 
6479 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6480 		if (err < 0)
6481 			goto unlock;
6482 
6483 		if (changed)
6484 			err = new_settings(hdev, sk);
6485 
6486 		goto unlock;
6487 	}
6488 
6489 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6490 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6491 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6492 				      MGMT_STATUS_BUSY);
6493 		goto unlock;
6494 	}
6495 
6496 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6497 	if (!cmd)
6498 		err = -ENOMEM;
6499 	else
6500 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6501 					 set_advertising_complete);
6502 
6503 	if (err < 0 && cmd)
6504 		mgmt_pending_remove(cmd);
6505 
6506 unlock:
6507 	hci_dev_unlock(hdev);
6508 	return err;
6509 }
6510 
6511 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6512 			      void *data, u16 len)
6513 {
6514 	struct mgmt_cp_set_static_address *cp = data;
6515 	int err;
6516 
6517 	bt_dev_dbg(hdev, "sock %p", sk);
6518 
6519 	if (!lmp_le_capable(hdev))
6520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6521 				       MGMT_STATUS_NOT_SUPPORTED);
6522 
6523 	if (hdev_is_powered(hdev))
6524 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6525 				       MGMT_STATUS_REJECTED);
6526 
6527 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6528 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6529 			return mgmt_cmd_status(sk, hdev->id,
6530 					       MGMT_OP_SET_STATIC_ADDRESS,
6531 					       MGMT_STATUS_INVALID_PARAMS);
6532 
6533 		/* Two most significant bits shall be set */
6534 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6535 			return mgmt_cmd_status(sk, hdev->id,
6536 					       MGMT_OP_SET_STATIC_ADDRESS,
6537 					       MGMT_STATUS_INVALID_PARAMS);
6538 	}
6539 
6540 	hci_dev_lock(hdev);
6541 
6542 	bacpy(&hdev->static_addr, &cp->bdaddr);
6543 
6544 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6545 	if (err < 0)
6546 		goto unlock;
6547 
6548 	err = new_settings(hdev, sk);
6549 
6550 unlock:
6551 	hci_dev_unlock(hdev);
6552 	return err;
6553 }
6554 
6555 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6556 			   void *data, u16 len)
6557 {
6558 	struct mgmt_cp_set_scan_params *cp = data;
6559 	__u16 interval, window;
6560 	int err;
6561 
6562 	bt_dev_dbg(hdev, "sock %p", sk);
6563 
6564 	if (!lmp_le_capable(hdev))
6565 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6566 				       MGMT_STATUS_NOT_SUPPORTED);
6567 
6568 	/* Keep allowed ranges in sync with set_mesh() */
6569 	interval = __le16_to_cpu(cp->interval);
6570 
6571 	if (interval < 0x0004 || interval > 0x4000)
6572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573 				       MGMT_STATUS_INVALID_PARAMS);
6574 
6575 	window = __le16_to_cpu(cp->window);
6576 
6577 	if (window < 0x0004 || window > 0x4000)
6578 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579 				       MGMT_STATUS_INVALID_PARAMS);
6580 
6581 	if (window > interval)
6582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 				       MGMT_STATUS_INVALID_PARAMS);
6584 
6585 	hci_dev_lock(hdev);
6586 
6587 	hdev->le_scan_interval = interval;
6588 	hdev->le_scan_window = window;
6589 
6590 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6591 				NULL, 0);
6592 
6593 	/* If background scan is running, restart it so new parameters are
6594 	 * loaded.
6595 	 */
6596 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6597 	    hdev->discovery.state == DISCOVERY_STOPPED)
6598 		hci_update_passive_scan(hdev);
6599 
6600 	hci_dev_unlock(hdev);
6601 
6602 	return err;
6603 }
6604 
6605 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6606 {
6607 	struct mgmt_pending_cmd *cmd = data;
6608 
6609 	bt_dev_dbg(hdev, "err %d", err);
6610 
6611 	if (err) {
6612 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6613 				mgmt_status(err));
6614 	} else {
6615 		struct mgmt_mode *cp = cmd->param;
6616 
6617 		if (cp->val)
6618 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6619 		else
6620 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6621 
6622 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6623 		new_settings(hdev, cmd->sk);
6624 	}
6625 
6626 	mgmt_pending_free(cmd);
6627 }
6628 
6629 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6630 {
6631 	struct mgmt_pending_cmd *cmd = data;
6632 	struct mgmt_mode *cp = cmd->param;
6633 
6634 	return hci_write_fast_connectable_sync(hdev, cp->val);
6635 }
6636 
6637 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6638 				void *data, u16 len)
6639 {
6640 	struct mgmt_mode *cp = data;
6641 	struct mgmt_pending_cmd *cmd;
6642 	int err;
6643 
6644 	bt_dev_dbg(hdev, "sock %p", sk);
6645 
6646 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6647 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6648 		return mgmt_cmd_status(sk, hdev->id,
6649 				       MGMT_OP_SET_FAST_CONNECTABLE,
6650 				       MGMT_STATUS_NOT_SUPPORTED);
6651 
6652 	if (cp->val != 0x00 && cp->val != 0x01)
6653 		return mgmt_cmd_status(sk, hdev->id,
6654 				       MGMT_OP_SET_FAST_CONNECTABLE,
6655 				       MGMT_STATUS_INVALID_PARAMS);
6656 
6657 	hci_dev_lock(hdev);
6658 
6659 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6660 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6661 		goto unlock;
6662 	}
6663 
6664 	if (!hdev_is_powered(hdev)) {
6665 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6666 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667 		new_settings(hdev, sk);
6668 		goto unlock;
6669 	}
6670 
6671 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6672 			       len);
6673 	if (!cmd)
6674 		err = -ENOMEM;
6675 	else
6676 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6677 					 fast_connectable_complete);
6678 
6679 	if (err < 0) {
6680 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6681 				MGMT_STATUS_FAILED);
6682 
6683 		if (cmd)
6684 			mgmt_pending_free(cmd);
6685 	}
6686 
6687 unlock:
6688 	hci_dev_unlock(hdev);
6689 
6690 	return err;
6691 }
6692 
6693 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6694 {
6695 	struct mgmt_pending_cmd *cmd = data;
6696 
6697 	bt_dev_dbg(hdev, "err %d", err);
6698 
6699 	if (err) {
6700 		u8 mgmt_err = mgmt_status(err);
6701 
6702 		/* We need to restore the flag if related HCI commands
6703 		 * failed.
6704 		 */
6705 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6706 
6707 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6708 	} else {
6709 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6710 		new_settings(hdev, cmd->sk);
6711 	}
6712 
6713 	mgmt_pending_free(cmd);
6714 }
6715 
6716 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6717 {
6718 	int status;
6719 
6720 	status = hci_write_fast_connectable_sync(hdev, false);
6721 
6722 	if (!status)
6723 		status = hci_update_scan_sync(hdev);
6724 
6725 	/* Since only the advertising data flags will change, there
6726 	 * is no need to update the scan response data.
6727 	 */
6728 	if (!status)
6729 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6730 
6731 	return status;
6732 }
6733 
6734 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6735 {
6736 	struct mgmt_mode *cp = data;
6737 	struct mgmt_pending_cmd *cmd;
6738 	int err;
6739 
6740 	bt_dev_dbg(hdev, "sock %p", sk);
6741 
6742 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6744 				       MGMT_STATUS_NOT_SUPPORTED);
6745 
6746 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 				       MGMT_STATUS_REJECTED);
6749 
6750 	if (cp->val != 0x00 && cp->val != 0x01)
6751 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 				       MGMT_STATUS_INVALID_PARAMS);
6753 
6754 	hci_dev_lock(hdev);
6755 
6756 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6757 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6758 		goto unlock;
6759 	}
6760 
6761 	if (!hdev_is_powered(hdev)) {
6762 		if (!cp->val) {
6763 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6764 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6765 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6766 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6767 		}
6768 
6769 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6770 
6771 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6772 		if (err < 0)
6773 			goto unlock;
6774 
6775 		err = new_settings(hdev, sk);
6776 		goto unlock;
6777 	}
6778 
6779 	/* Reject disabling when powered on */
6780 	if (!cp->val) {
6781 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6782 				      MGMT_STATUS_REJECTED);
6783 		goto unlock;
6784 	} else {
6785 		/* When configuring a dual-mode controller to operate
6786 		 * with LE only and using a static address, then switching
6787 		 * BR/EDR back on is not allowed.
6788 		 *
6789 		 * Dual-mode controllers shall operate with the public
6790 		 * address as its identity address for BR/EDR and LE. So
6791 		 * reject the attempt to create an invalid configuration.
6792 		 *
6793 		 * The same restrictions applies when secure connections
6794 		 * has been enabled. For BR/EDR this is a controller feature
6795 		 * while for LE it is a host stack feature. This means that
6796 		 * switching BR/EDR back on when secure connections has been
6797 		 * enabled is not a supported transaction.
6798 		 */
6799 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6800 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6801 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6802 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6803 					      MGMT_STATUS_REJECTED);
6804 			goto unlock;
6805 		}
6806 	}
6807 
6808 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6809 	if (!cmd)
6810 		err = -ENOMEM;
6811 	else
6812 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6813 					 set_bredr_complete);
6814 
6815 	if (err < 0) {
6816 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6817 				MGMT_STATUS_FAILED);
6818 		if (cmd)
6819 			mgmt_pending_free(cmd);
6820 
6821 		goto unlock;
6822 	}
6823 
6824 	/* We need to flip the bit already here so that
6825 	 * hci_req_update_adv_data generates the correct flags.
6826 	 */
6827 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6828 
6829 unlock:
6830 	hci_dev_unlock(hdev);
6831 	return err;
6832 }
6833 
6834 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6835 {
6836 	struct mgmt_pending_cmd *cmd = data;
6837 	struct mgmt_mode *cp;
6838 
6839 	bt_dev_dbg(hdev, "err %d", err);
6840 
6841 	if (err) {
6842 		u8 mgmt_err = mgmt_status(err);
6843 
6844 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6845 		goto done;
6846 	}
6847 
6848 	cp = cmd->param;
6849 
6850 	switch (cp->val) {
6851 	case 0x00:
6852 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6853 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6854 		break;
6855 	case 0x01:
6856 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6857 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6858 		break;
6859 	case 0x02:
6860 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6861 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6862 		break;
6863 	}
6864 
6865 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6866 	new_settings(hdev, cmd->sk);
6867 
6868 done:
6869 	mgmt_pending_free(cmd);
6870 }
6871 
6872 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6873 {
6874 	struct mgmt_pending_cmd *cmd = data;
6875 	struct mgmt_mode *cp = cmd->param;
6876 	u8 val = !!cp->val;
6877 
6878 	/* Force write of val */
6879 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6880 
6881 	return hci_write_sc_support_sync(hdev, val);
6882 }
6883 
6884 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6885 			   void *data, u16 len)
6886 {
6887 	struct mgmt_mode *cp = data;
6888 	struct mgmt_pending_cmd *cmd;
6889 	u8 val;
6890 	int err;
6891 
6892 	bt_dev_dbg(hdev, "sock %p", sk);
6893 
6894 	if (!lmp_sc_capable(hdev) &&
6895 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6896 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6897 				       MGMT_STATUS_NOT_SUPPORTED);
6898 
6899 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6900 	    lmp_sc_capable(hdev) &&
6901 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6903 				       MGMT_STATUS_REJECTED);
6904 
6905 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6906 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6907 				       MGMT_STATUS_INVALID_PARAMS);
6908 
6909 	hci_dev_lock(hdev);
6910 
6911 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6912 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6913 		bool changed;
6914 
6915 		if (cp->val) {
6916 			changed = !hci_dev_test_and_set_flag(hdev,
6917 							     HCI_SC_ENABLED);
6918 			if (cp->val == 0x02)
6919 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6920 			else
6921 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6922 		} else {
6923 			changed = hci_dev_test_and_clear_flag(hdev,
6924 							      HCI_SC_ENABLED);
6925 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 		}
6927 
6928 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6929 		if (err < 0)
6930 			goto failed;
6931 
6932 		if (changed)
6933 			err = new_settings(hdev, sk);
6934 
6935 		goto failed;
6936 	}
6937 
6938 	val = !!cp->val;
6939 
6940 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6941 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6942 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6943 		goto failed;
6944 	}
6945 
6946 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6947 	if (!cmd)
6948 		err = -ENOMEM;
6949 	else
6950 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6951 					 set_secure_conn_complete);
6952 
6953 	if (err < 0) {
6954 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6955 				MGMT_STATUS_FAILED);
6956 		if (cmd)
6957 			mgmt_pending_free(cmd);
6958 	}
6959 
6960 failed:
6961 	hci_dev_unlock(hdev);
6962 	return err;
6963 }
6964 
6965 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6966 			  void *data, u16 len)
6967 {
6968 	struct mgmt_mode *cp = data;
6969 	bool changed, use_changed;
6970 	int err;
6971 
6972 	bt_dev_dbg(hdev, "sock %p", sk);
6973 
6974 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6976 				       MGMT_STATUS_INVALID_PARAMS);
6977 
6978 	hci_dev_lock(hdev);
6979 
6980 	if (cp->val)
6981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6982 	else
6983 		changed = hci_dev_test_and_clear_flag(hdev,
6984 						      HCI_KEEP_DEBUG_KEYS);
6985 
6986 	if (cp->val == 0x02)
6987 		use_changed = !hci_dev_test_and_set_flag(hdev,
6988 							 HCI_USE_DEBUG_KEYS);
6989 	else
6990 		use_changed = hci_dev_test_and_clear_flag(hdev,
6991 							  HCI_USE_DEBUG_KEYS);
6992 
6993 	if (hdev_is_powered(hdev) && use_changed &&
6994 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6995 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6996 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6997 			     sizeof(mode), &mode);
6998 	}
6999 
7000 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7001 	if (err < 0)
7002 		goto unlock;
7003 
7004 	if (changed)
7005 		err = new_settings(hdev, sk);
7006 
7007 unlock:
7008 	hci_dev_unlock(hdev);
7009 	return err;
7010 }
7011 
7012 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7013 		       u16 len)
7014 {
7015 	struct mgmt_cp_set_privacy *cp = cp_data;
7016 	bool changed;
7017 	int err;
7018 
7019 	bt_dev_dbg(hdev, "sock %p", sk);
7020 
7021 	if (!lmp_le_capable(hdev))
7022 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7023 				       MGMT_STATUS_NOT_SUPPORTED);
7024 
7025 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7026 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7027 				       MGMT_STATUS_INVALID_PARAMS);
7028 
7029 	if (hdev_is_powered(hdev))
7030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7031 				       MGMT_STATUS_REJECTED);
7032 
7033 	hci_dev_lock(hdev);
7034 
7035 	/* If user space supports this command it is also expected to
7036 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7037 	 */
7038 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7039 
7040 	if (cp->privacy) {
7041 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7042 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7043 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7044 		hci_adv_instances_set_rpa_expired(hdev, true);
7045 		if (cp->privacy == 0x02)
7046 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7047 		else
7048 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7049 	} else {
7050 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7051 		memset(hdev->irk, 0, sizeof(hdev->irk));
7052 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7053 		hci_adv_instances_set_rpa_expired(hdev, false);
7054 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7055 	}
7056 
7057 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7058 	if (err < 0)
7059 		goto unlock;
7060 
7061 	if (changed)
7062 		err = new_settings(hdev, sk);
7063 
7064 unlock:
7065 	hci_dev_unlock(hdev);
7066 	return err;
7067 }
7068 
7069 static bool irk_is_valid(struct mgmt_irk_info *irk)
7070 {
7071 	switch (irk->addr.type) {
7072 	case BDADDR_LE_PUBLIC:
7073 		return true;
7074 
7075 	case BDADDR_LE_RANDOM:
7076 		/* Two most significant bits shall be set */
7077 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7078 			return false;
7079 		return true;
7080 	}
7081 
7082 	return false;
7083 }
7084 
7085 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7086 		     u16 len)
7087 {
7088 	struct mgmt_cp_load_irks *cp = cp_data;
7089 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7090 				   sizeof(struct mgmt_irk_info));
7091 	u16 irk_count, expected_len;
7092 	int i, err;
7093 
7094 	bt_dev_dbg(hdev, "sock %p", sk);
7095 
7096 	if (!lmp_le_capable(hdev))
7097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7098 				       MGMT_STATUS_NOT_SUPPORTED);
7099 
7100 	irk_count = __le16_to_cpu(cp->irk_count);
7101 	if (irk_count > max_irk_count) {
7102 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7103 			   irk_count);
7104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105 				       MGMT_STATUS_INVALID_PARAMS);
7106 	}
7107 
7108 	expected_len = struct_size(cp, irks, irk_count);
7109 	if (expected_len != len) {
7110 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7111 			   expected_len, len);
7112 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 				       MGMT_STATUS_INVALID_PARAMS);
7114 	}
7115 
7116 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7117 
7118 	for (i = 0; i < irk_count; i++) {
7119 		struct mgmt_irk_info *key = &cp->irks[i];
7120 
7121 		if (!irk_is_valid(key))
7122 			return mgmt_cmd_status(sk, hdev->id,
7123 					       MGMT_OP_LOAD_IRKS,
7124 					       MGMT_STATUS_INVALID_PARAMS);
7125 	}
7126 
7127 	hci_dev_lock(hdev);
7128 
7129 	hci_smp_irks_clear(hdev);
7130 
7131 	for (i = 0; i < irk_count; i++) {
7132 		struct mgmt_irk_info *irk = &cp->irks[i];
7133 
7134 		if (hci_is_blocked_key(hdev,
7135 				       HCI_BLOCKED_KEY_TYPE_IRK,
7136 				       irk->val)) {
7137 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7138 				    &irk->addr.bdaddr);
7139 			continue;
7140 		}
7141 
7142 		hci_add_irk(hdev, &irk->addr.bdaddr,
7143 			    le_addr_type(irk->addr.type), irk->val,
7144 			    BDADDR_ANY);
7145 	}
7146 
7147 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7148 
7149 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7150 
7151 	hci_dev_unlock(hdev);
7152 
7153 	return err;
7154 }
7155 
7156 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7157 {
7158 	if (key->initiator != 0x00 && key->initiator != 0x01)
7159 		return false;
7160 
7161 	switch (key->addr.type) {
7162 	case BDADDR_LE_PUBLIC:
7163 		return true;
7164 
7165 	case BDADDR_LE_RANDOM:
7166 		/* Two most significant bits shall be set */
7167 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7168 			return false;
7169 		return true;
7170 	}
7171 
7172 	return false;
7173 }
7174 
7175 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7176 			       void *cp_data, u16 len)
7177 {
7178 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7179 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7180 				   sizeof(struct mgmt_ltk_info));
7181 	u16 key_count, expected_len;
7182 	int i, err;
7183 
7184 	bt_dev_dbg(hdev, "sock %p", sk);
7185 
7186 	if (!lmp_le_capable(hdev))
7187 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7188 				       MGMT_STATUS_NOT_SUPPORTED);
7189 
7190 	key_count = __le16_to_cpu(cp->key_count);
7191 	if (key_count > max_key_count) {
7192 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7193 			   key_count);
7194 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7195 				       MGMT_STATUS_INVALID_PARAMS);
7196 	}
7197 
7198 	expected_len = struct_size(cp, keys, key_count);
7199 	if (expected_len != len) {
7200 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7201 			   expected_len, len);
7202 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7203 				       MGMT_STATUS_INVALID_PARAMS);
7204 	}
7205 
7206 	bt_dev_dbg(hdev, "key_count %u", key_count);
7207 
7208 	hci_dev_lock(hdev);
7209 
7210 	hci_smp_ltks_clear(hdev);
7211 
7212 	for (i = 0; i < key_count; i++) {
7213 		struct mgmt_ltk_info *key = &cp->keys[i];
7214 		u8 type, authenticated;
7215 
7216 		if (hci_is_blocked_key(hdev,
7217 				       HCI_BLOCKED_KEY_TYPE_LTK,
7218 				       key->val)) {
7219 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7220 				    &key->addr.bdaddr);
7221 			continue;
7222 		}
7223 
7224 		if (!ltk_is_valid(key)) {
7225 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7226 				    &key->addr.bdaddr);
7227 			continue;
7228 		}
7229 
7230 		switch (key->type) {
7231 		case MGMT_LTK_UNAUTHENTICATED:
7232 			authenticated = 0x00;
7233 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7234 			break;
7235 		case MGMT_LTK_AUTHENTICATED:
7236 			authenticated = 0x01;
7237 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7238 			break;
7239 		case MGMT_LTK_P256_UNAUTH:
7240 			authenticated = 0x00;
7241 			type = SMP_LTK_P256;
7242 			break;
7243 		case MGMT_LTK_P256_AUTH:
7244 			authenticated = 0x01;
7245 			type = SMP_LTK_P256;
7246 			break;
7247 		case MGMT_LTK_P256_DEBUG:
7248 			authenticated = 0x00;
7249 			type = SMP_LTK_P256_DEBUG;
7250 			fallthrough;
7251 		default:
7252 			continue;
7253 		}
7254 
7255 		hci_add_ltk(hdev, &key->addr.bdaddr,
7256 			    le_addr_type(key->addr.type), type, authenticated,
7257 			    key->val, key->enc_size, key->ediv, key->rand);
7258 	}
7259 
7260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7261 			   NULL, 0);
7262 
7263 	hci_dev_unlock(hdev);
7264 
7265 	return err;
7266 }
7267 
7268 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7269 {
7270 	struct mgmt_pending_cmd *cmd = data;
7271 	struct hci_conn *conn = cmd->user_data;
7272 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7273 	struct mgmt_rp_get_conn_info rp;
7274 	u8 status;
7275 
7276 	bt_dev_dbg(hdev, "err %d", err);
7277 
7278 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7279 
7280 	status = mgmt_status(err);
7281 	if (status == MGMT_STATUS_SUCCESS) {
7282 		rp.rssi = conn->rssi;
7283 		rp.tx_power = conn->tx_power;
7284 		rp.max_tx_power = conn->max_tx_power;
7285 	} else {
7286 		rp.rssi = HCI_RSSI_INVALID;
7287 		rp.tx_power = HCI_TX_POWER_INVALID;
7288 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7289 	}
7290 
7291 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7292 			  &rp, sizeof(rp));
7293 
7294 	mgmt_pending_free(cmd);
7295 }
7296 
7297 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7298 {
7299 	struct mgmt_pending_cmd *cmd = data;
7300 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7301 	struct hci_conn *conn;
7302 	int err;
7303 	__le16   handle;
7304 
7305 	/* Make sure we are still connected */
7306 	if (cp->addr.type == BDADDR_BREDR)
7307 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7308 					       &cp->addr.bdaddr);
7309 	else
7310 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7311 
7312 	if (!conn || conn->state != BT_CONNECTED)
7313 		return MGMT_STATUS_NOT_CONNECTED;
7314 
7315 	cmd->user_data = conn;
7316 	handle = cpu_to_le16(conn->handle);
7317 
7318 	/* Refresh RSSI each time */
7319 	err = hci_read_rssi_sync(hdev, handle);
7320 
7321 	/* For LE links TX power does not change thus we don't need to
7322 	 * query for it once value is known.
7323 	 */
7324 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7325 		     conn->tx_power == HCI_TX_POWER_INVALID))
7326 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7327 
7328 	/* Max TX power needs to be read only once per connection */
7329 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7330 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7331 
7332 	return err;
7333 }
7334 
7335 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7336 			 u16 len)
7337 {
7338 	struct mgmt_cp_get_conn_info *cp = data;
7339 	struct mgmt_rp_get_conn_info rp;
7340 	struct hci_conn *conn;
7341 	unsigned long conn_info_age;
7342 	int err = 0;
7343 
7344 	bt_dev_dbg(hdev, "sock %p", sk);
7345 
7346 	memset(&rp, 0, sizeof(rp));
7347 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7348 	rp.addr.type = cp->addr.type;
7349 
7350 	if (!bdaddr_type_is_valid(cp->addr.type))
7351 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7352 					 MGMT_STATUS_INVALID_PARAMS,
7353 					 &rp, sizeof(rp));
7354 
7355 	hci_dev_lock(hdev);
7356 
7357 	if (!hdev_is_powered(hdev)) {
7358 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7359 					MGMT_STATUS_NOT_POWERED, &rp,
7360 					sizeof(rp));
7361 		goto unlock;
7362 	}
7363 
7364 	if (cp->addr.type == BDADDR_BREDR)
7365 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7366 					       &cp->addr.bdaddr);
7367 	else
7368 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7369 
7370 	if (!conn || conn->state != BT_CONNECTED) {
7371 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7372 					MGMT_STATUS_NOT_CONNECTED, &rp,
7373 					sizeof(rp));
7374 		goto unlock;
7375 	}
7376 
7377 	/* To avoid client trying to guess when to poll again for information we
7378 	 * calculate conn info age as random value between min/max set in hdev.
7379 	 */
7380 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7381 						 hdev->conn_info_max_age - 1);
7382 
7383 	/* Query controller to refresh cached values if they are too old or were
7384 	 * never read.
7385 	 */
7386 	if (time_after(jiffies, conn->conn_info_timestamp +
7387 		       msecs_to_jiffies(conn_info_age)) ||
7388 	    !conn->conn_info_timestamp) {
7389 		struct mgmt_pending_cmd *cmd;
7390 
7391 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7392 				       len);
7393 		if (!cmd) {
7394 			err = -ENOMEM;
7395 		} else {
7396 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7397 						 cmd, get_conn_info_complete);
7398 		}
7399 
7400 		if (err < 0) {
7401 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7402 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7403 
7404 			if (cmd)
7405 				mgmt_pending_free(cmd);
7406 
7407 			goto unlock;
7408 		}
7409 
7410 		conn->conn_info_timestamp = jiffies;
7411 	} else {
7412 		/* Cache is valid, just reply with values cached in hci_conn */
7413 		rp.rssi = conn->rssi;
7414 		rp.tx_power = conn->tx_power;
7415 		rp.max_tx_power = conn->max_tx_power;
7416 
7417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7418 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7419 	}
7420 
7421 unlock:
7422 	hci_dev_unlock(hdev);
7423 	return err;
7424 }
7425 
7426 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7427 {
7428 	struct mgmt_pending_cmd *cmd = data;
7429 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7430 	struct mgmt_rp_get_clock_info rp;
7431 	struct hci_conn *conn = cmd->user_data;
7432 	u8 status = mgmt_status(err);
7433 
7434 	bt_dev_dbg(hdev, "err %d", err);
7435 
7436 	memset(&rp, 0, sizeof(rp));
7437 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7438 	rp.addr.type = cp->addr.type;
7439 
7440 	if (err)
7441 		goto complete;
7442 
7443 	rp.local_clock = cpu_to_le32(hdev->clock);
7444 
7445 	if (conn) {
7446 		rp.piconet_clock = cpu_to_le32(conn->clock);
7447 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7448 	}
7449 
7450 complete:
7451 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7452 			  sizeof(rp));
7453 
7454 	mgmt_pending_free(cmd);
7455 }
7456 
7457 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7458 {
7459 	struct mgmt_pending_cmd *cmd = data;
7460 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7461 	struct hci_cp_read_clock hci_cp;
7462 	struct hci_conn *conn;
7463 
7464 	memset(&hci_cp, 0, sizeof(hci_cp));
7465 	hci_read_clock_sync(hdev, &hci_cp);
7466 
7467 	/* Make sure connection still exists */
7468 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7469 	if (!conn || conn->state != BT_CONNECTED)
7470 		return MGMT_STATUS_NOT_CONNECTED;
7471 
7472 	cmd->user_data = conn;
7473 	hci_cp.handle = cpu_to_le16(conn->handle);
7474 	hci_cp.which = 0x01; /* Piconet clock */
7475 
7476 	return hci_read_clock_sync(hdev, &hci_cp);
7477 }
7478 
7479 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7480 								u16 len)
7481 {
7482 	struct mgmt_cp_get_clock_info *cp = data;
7483 	struct mgmt_rp_get_clock_info rp;
7484 	struct mgmt_pending_cmd *cmd;
7485 	struct hci_conn *conn;
7486 	int err;
7487 
7488 	bt_dev_dbg(hdev, "sock %p", sk);
7489 
7490 	memset(&rp, 0, sizeof(rp));
7491 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7492 	rp.addr.type = cp->addr.type;
7493 
7494 	if (cp->addr.type != BDADDR_BREDR)
7495 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7496 					 MGMT_STATUS_INVALID_PARAMS,
7497 					 &rp, sizeof(rp));
7498 
7499 	hci_dev_lock(hdev);
7500 
7501 	if (!hdev_is_powered(hdev)) {
7502 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7503 					MGMT_STATUS_NOT_POWERED, &rp,
7504 					sizeof(rp));
7505 		goto unlock;
7506 	}
7507 
7508 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7509 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7510 					       &cp->addr.bdaddr);
7511 		if (!conn || conn->state != BT_CONNECTED) {
7512 			err = mgmt_cmd_complete(sk, hdev->id,
7513 						MGMT_OP_GET_CLOCK_INFO,
7514 						MGMT_STATUS_NOT_CONNECTED,
7515 						&rp, sizeof(rp));
7516 			goto unlock;
7517 		}
7518 	} else {
7519 		conn = NULL;
7520 	}
7521 
7522 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7523 	if (!cmd)
7524 		err = -ENOMEM;
7525 	else
7526 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7527 					 get_clock_info_complete);
7528 
7529 	if (err < 0) {
7530 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7531 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7532 
7533 		if (cmd)
7534 			mgmt_pending_free(cmd);
7535 	}
7536 
7537 
7538 unlock:
7539 	hci_dev_unlock(hdev);
7540 	return err;
7541 }
7542 
7543 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7544 {
7545 	struct hci_conn *conn;
7546 
7547 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7548 	if (!conn)
7549 		return false;
7550 
7551 	if (conn->dst_type != type)
7552 		return false;
7553 
7554 	if (conn->state != BT_CONNECTED)
7555 		return false;
7556 
7557 	return true;
7558 }
7559 
7560 /* This function requires the caller holds hdev->lock */
7561 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7562 			       u8 addr_type, u8 auto_connect)
7563 {
7564 	struct hci_conn_params *params;
7565 
7566 	params = hci_conn_params_add(hdev, addr, addr_type);
7567 	if (!params)
7568 		return -EIO;
7569 
7570 	if (params->auto_connect == auto_connect)
7571 		return 0;
7572 
7573 	hci_pend_le_list_del_init(params);
7574 
7575 	switch (auto_connect) {
7576 	case HCI_AUTO_CONN_DISABLED:
7577 	case HCI_AUTO_CONN_LINK_LOSS:
7578 		/* If auto connect is being disabled when we're trying to
7579 		 * connect to device, keep connecting.
7580 		 */
7581 		if (params->explicit_connect)
7582 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7583 		break;
7584 	case HCI_AUTO_CONN_REPORT:
7585 		if (params->explicit_connect)
7586 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7587 		else
7588 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7589 		break;
7590 	case HCI_AUTO_CONN_DIRECT:
7591 	case HCI_AUTO_CONN_ALWAYS:
7592 		if (!is_connected(hdev, addr, addr_type))
7593 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7594 		break;
7595 	}
7596 
7597 	params->auto_connect = auto_connect;
7598 
7599 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7600 		   addr, addr_type, auto_connect);
7601 
7602 	return 0;
7603 }
7604 
7605 static void device_added(struct sock *sk, struct hci_dev *hdev,
7606 			 bdaddr_t *bdaddr, u8 type, u8 action)
7607 {
7608 	struct mgmt_ev_device_added ev;
7609 
7610 	bacpy(&ev.addr.bdaddr, bdaddr);
7611 	ev.addr.type = type;
7612 	ev.action = action;
7613 
7614 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7615 }
7616 
7617 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7618 {
7619 	struct mgmt_pending_cmd *cmd = data;
7620 	struct mgmt_cp_add_device *cp = cmd->param;
7621 
7622 	if (!err) {
7623 		struct hci_conn_params *params;
7624 
7625 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7626 						le_addr_type(cp->addr.type));
7627 
7628 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7629 			     cp->action);
7630 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7631 				     cp->addr.type, hdev->conn_flags,
7632 				     params ? params->flags : 0);
7633 	}
7634 
7635 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7636 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7637 	mgmt_pending_free(cmd);
7638 }
7639 
7640 static int add_device_sync(struct hci_dev *hdev, void *data)
7641 {
7642 	return hci_update_passive_scan_sync(hdev);
7643 }
7644 
7645 static int add_device(struct sock *sk, struct hci_dev *hdev,
7646 		      void *data, u16 len)
7647 {
7648 	struct mgmt_pending_cmd *cmd;
7649 	struct mgmt_cp_add_device *cp = data;
7650 	u8 auto_conn, addr_type;
7651 	struct hci_conn_params *params;
7652 	int err;
7653 	u32 current_flags = 0;
7654 	u32 supported_flags;
7655 
7656 	bt_dev_dbg(hdev, "sock %p", sk);
7657 
7658 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7659 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7660 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7661 					 MGMT_STATUS_INVALID_PARAMS,
7662 					 &cp->addr, sizeof(cp->addr));
7663 
7664 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7665 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7666 					 MGMT_STATUS_INVALID_PARAMS,
7667 					 &cp->addr, sizeof(cp->addr));
7668 
7669 	hci_dev_lock(hdev);
7670 
7671 	if (cp->addr.type == BDADDR_BREDR) {
7672 		/* Only incoming connections action is supported for now */
7673 		if (cp->action != 0x01) {
7674 			err = mgmt_cmd_complete(sk, hdev->id,
7675 						MGMT_OP_ADD_DEVICE,
7676 						MGMT_STATUS_INVALID_PARAMS,
7677 						&cp->addr, sizeof(cp->addr));
7678 			goto unlock;
7679 		}
7680 
7681 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7682 						     &cp->addr.bdaddr,
7683 						     cp->addr.type, 0);
7684 		if (err)
7685 			goto unlock;
7686 
7687 		hci_update_scan(hdev);
7688 
7689 		goto added;
7690 	}
7691 
7692 	addr_type = le_addr_type(cp->addr.type);
7693 
7694 	if (cp->action == 0x02)
7695 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7696 	else if (cp->action == 0x01)
7697 		auto_conn = HCI_AUTO_CONN_DIRECT;
7698 	else
7699 		auto_conn = HCI_AUTO_CONN_REPORT;
7700 
7701 	/* Kernel internally uses conn_params with resolvable private
7702 	 * address, but Add Device allows only identity addresses.
7703 	 * Make sure it is enforced before calling
7704 	 * hci_conn_params_lookup.
7705 	 */
7706 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7707 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7708 					MGMT_STATUS_INVALID_PARAMS,
7709 					&cp->addr, sizeof(cp->addr));
7710 		goto unlock;
7711 	}
7712 
7713 	/* If the connection parameters don't exist for this device,
7714 	 * they will be created and configured with defaults.
7715 	 */
7716 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7717 				auto_conn) < 0) {
7718 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7719 					MGMT_STATUS_FAILED, &cp->addr,
7720 					sizeof(cp->addr));
7721 		goto unlock;
7722 	} else {
7723 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7724 						addr_type);
7725 		if (params)
7726 			current_flags = params->flags;
7727 	}
7728 
7729 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7730 	if (!cmd) {
7731 		err = -ENOMEM;
7732 		goto unlock;
7733 	}
7734 
7735 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7736 				 add_device_complete);
7737 	if (err < 0) {
7738 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7739 					MGMT_STATUS_FAILED, &cp->addr,
7740 					sizeof(cp->addr));
7741 		mgmt_pending_free(cmd);
7742 	}
7743 
7744 	goto unlock;
7745 
7746 added:
7747 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7748 	supported_flags = hdev->conn_flags;
7749 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7750 			     supported_flags, current_flags);
7751 
7752 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7753 				MGMT_STATUS_SUCCESS, &cp->addr,
7754 				sizeof(cp->addr));
7755 
7756 unlock:
7757 	hci_dev_unlock(hdev);
7758 	return err;
7759 }
7760 
7761 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7762 			   bdaddr_t *bdaddr, u8 type)
7763 {
7764 	struct mgmt_ev_device_removed ev;
7765 
7766 	bacpy(&ev.addr.bdaddr, bdaddr);
7767 	ev.addr.type = type;
7768 
7769 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7770 }
7771 
7772 static int remove_device_sync(struct hci_dev *hdev, void *data)
7773 {
7774 	return hci_update_passive_scan_sync(hdev);
7775 }
7776 
7777 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7778 			 void *data, u16 len)
7779 {
7780 	struct mgmt_cp_remove_device *cp = data;
7781 	int err;
7782 
7783 	bt_dev_dbg(hdev, "sock %p", sk);
7784 
7785 	hci_dev_lock(hdev);
7786 
7787 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7788 		struct hci_conn_params *params;
7789 		u8 addr_type;
7790 
7791 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7792 			err = mgmt_cmd_complete(sk, hdev->id,
7793 						MGMT_OP_REMOVE_DEVICE,
7794 						MGMT_STATUS_INVALID_PARAMS,
7795 						&cp->addr, sizeof(cp->addr));
7796 			goto unlock;
7797 		}
7798 
7799 		if (cp->addr.type == BDADDR_BREDR) {
7800 			err = hci_bdaddr_list_del(&hdev->accept_list,
7801 						  &cp->addr.bdaddr,
7802 						  cp->addr.type);
7803 			if (err) {
7804 				err = mgmt_cmd_complete(sk, hdev->id,
7805 							MGMT_OP_REMOVE_DEVICE,
7806 							MGMT_STATUS_INVALID_PARAMS,
7807 							&cp->addr,
7808 							sizeof(cp->addr));
7809 				goto unlock;
7810 			}
7811 
7812 			hci_update_scan(hdev);
7813 
7814 			device_removed(sk, hdev, &cp->addr.bdaddr,
7815 				       cp->addr.type);
7816 			goto complete;
7817 		}
7818 
7819 		addr_type = le_addr_type(cp->addr.type);
7820 
7821 		/* Kernel internally uses conn_params with resolvable private
7822 		 * address, but Remove Device allows only identity addresses.
7823 		 * Make sure it is enforced before calling
7824 		 * hci_conn_params_lookup.
7825 		 */
7826 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7827 			err = mgmt_cmd_complete(sk, hdev->id,
7828 						MGMT_OP_REMOVE_DEVICE,
7829 						MGMT_STATUS_INVALID_PARAMS,
7830 						&cp->addr, sizeof(cp->addr));
7831 			goto unlock;
7832 		}
7833 
7834 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7835 						addr_type);
7836 		if (!params) {
7837 			err = mgmt_cmd_complete(sk, hdev->id,
7838 						MGMT_OP_REMOVE_DEVICE,
7839 						MGMT_STATUS_INVALID_PARAMS,
7840 						&cp->addr, sizeof(cp->addr));
7841 			goto unlock;
7842 		}
7843 
7844 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7845 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7846 			err = mgmt_cmd_complete(sk, hdev->id,
7847 						MGMT_OP_REMOVE_DEVICE,
7848 						MGMT_STATUS_INVALID_PARAMS,
7849 						&cp->addr, sizeof(cp->addr));
7850 			goto unlock;
7851 		}
7852 
7853 		hci_conn_params_free(params);
7854 
7855 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7856 	} else {
7857 		struct hci_conn_params *p, *tmp;
7858 		struct bdaddr_list *b, *btmp;
7859 
7860 		if (cp->addr.type) {
7861 			err = mgmt_cmd_complete(sk, hdev->id,
7862 						MGMT_OP_REMOVE_DEVICE,
7863 						MGMT_STATUS_INVALID_PARAMS,
7864 						&cp->addr, sizeof(cp->addr));
7865 			goto unlock;
7866 		}
7867 
7868 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7869 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7870 			list_del(&b->list);
7871 			kfree(b);
7872 		}
7873 
7874 		hci_update_scan(hdev);
7875 
7876 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7877 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7878 				continue;
7879 			device_removed(sk, hdev, &p->addr, p->addr_type);
7880 			if (p->explicit_connect) {
7881 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7882 				continue;
7883 			}
7884 			hci_conn_params_free(p);
7885 		}
7886 
7887 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7888 	}
7889 
7890 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7891 
7892 complete:
7893 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7894 				MGMT_STATUS_SUCCESS, &cp->addr,
7895 				sizeof(cp->addr));
7896 unlock:
7897 	hci_dev_unlock(hdev);
7898 	return err;
7899 }
7900 
7901 static int conn_update_sync(struct hci_dev *hdev, void *data)
7902 {
7903 	struct hci_conn_params *params = data;
7904 	struct hci_conn *conn;
7905 
7906 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7907 	if (!conn)
7908 		return -ECANCELED;
7909 
7910 	return hci_le_conn_update_sync(hdev, conn, params);
7911 }
7912 
7913 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7914 			   u16 len)
7915 {
7916 	struct mgmt_cp_load_conn_param *cp = data;
7917 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7918 				     sizeof(struct mgmt_conn_param));
7919 	u16 param_count, expected_len;
7920 	int i;
7921 
7922 	if (!lmp_le_capable(hdev))
7923 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7924 				       MGMT_STATUS_NOT_SUPPORTED);
7925 
7926 	param_count = __le16_to_cpu(cp->param_count);
7927 	if (param_count > max_param_count) {
7928 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7929 			   param_count);
7930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7931 				       MGMT_STATUS_INVALID_PARAMS);
7932 	}
7933 
7934 	expected_len = struct_size(cp, params, param_count);
7935 	if (expected_len != len) {
7936 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7937 			   expected_len, len);
7938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7939 				       MGMT_STATUS_INVALID_PARAMS);
7940 	}
7941 
7942 	bt_dev_dbg(hdev, "param_count %u", param_count);
7943 
7944 	hci_dev_lock(hdev);
7945 
7946 	if (param_count > 1)
7947 		hci_conn_params_clear_disabled(hdev);
7948 
7949 	for (i = 0; i < param_count; i++) {
7950 		struct mgmt_conn_param *param = &cp->params[i];
7951 		struct hci_conn_params *hci_param;
7952 		u16 min, max, latency, timeout;
7953 		bool update = false;
7954 		u8 addr_type;
7955 
7956 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7957 			   param->addr.type);
7958 
7959 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7960 			addr_type = ADDR_LE_DEV_PUBLIC;
7961 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7962 			addr_type = ADDR_LE_DEV_RANDOM;
7963 		} else {
7964 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7965 			continue;
7966 		}
7967 
7968 		min = le16_to_cpu(param->min_interval);
7969 		max = le16_to_cpu(param->max_interval);
7970 		latency = le16_to_cpu(param->latency);
7971 		timeout = le16_to_cpu(param->timeout);
7972 
7973 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7974 			   min, max, latency, timeout);
7975 
7976 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7977 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7978 			continue;
7979 		}
7980 
7981 		/* Detect when the loading is for an existing parameter then
7982 		 * attempt to trigger the connection update procedure.
7983 		 */
7984 		if (!i && param_count == 1) {
7985 			hci_param = hci_conn_params_lookup(hdev,
7986 							   &param->addr.bdaddr,
7987 							   addr_type);
7988 			if (hci_param)
7989 				update = true;
7990 			else
7991 				hci_conn_params_clear_disabled(hdev);
7992 		}
7993 
7994 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7995 						addr_type);
7996 		if (!hci_param) {
7997 			bt_dev_err(hdev, "failed to add connection parameters");
7998 			continue;
7999 		}
8000 
8001 		hci_param->conn_min_interval = min;
8002 		hci_param->conn_max_interval = max;
8003 		hci_param->conn_latency = latency;
8004 		hci_param->supervision_timeout = timeout;
8005 
8006 		/* Check if we need to trigger a connection update */
8007 		if (update) {
8008 			struct hci_conn *conn;
8009 
8010 			/* Lookup for existing connection as central and check
8011 			 * if parameters match and if they don't then trigger
8012 			 * a connection update.
8013 			 */
8014 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8015 						       addr_type);
8016 			if (conn && conn->role == HCI_ROLE_MASTER &&
8017 			    (conn->le_conn_min_interval != min ||
8018 			     conn->le_conn_max_interval != max ||
8019 			     conn->le_conn_latency != latency ||
8020 			     conn->le_supv_timeout != timeout))
8021 				hci_cmd_sync_queue(hdev, conn_update_sync,
8022 						   hci_param, NULL);
8023 		}
8024 	}
8025 
8026 	hci_dev_unlock(hdev);
8027 
8028 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8029 				 NULL, 0);
8030 }
8031 
8032 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8033 			       void *data, u16 len)
8034 {
8035 	struct mgmt_cp_set_external_config *cp = data;
8036 	bool changed;
8037 	int err;
8038 
8039 	bt_dev_dbg(hdev, "sock %p", sk);
8040 
8041 	if (hdev_is_powered(hdev))
8042 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8043 				       MGMT_STATUS_REJECTED);
8044 
8045 	if (cp->config != 0x00 && cp->config != 0x01)
8046 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8047 				         MGMT_STATUS_INVALID_PARAMS);
8048 
8049 	if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8050 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8051 				       MGMT_STATUS_NOT_SUPPORTED);
8052 
8053 	hci_dev_lock(hdev);
8054 
8055 	if (cp->config)
8056 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8057 	else
8058 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8059 
8060 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8061 	if (err < 0)
8062 		goto unlock;
8063 
8064 	if (!changed)
8065 		goto unlock;
8066 
8067 	err = new_options(hdev, sk);
8068 
8069 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8070 		mgmt_index_removed(hdev);
8071 
8072 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8073 			hci_dev_set_flag(hdev, HCI_CONFIG);
8074 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8075 
8076 			queue_work(hdev->req_workqueue, &hdev->power_on);
8077 		} else {
8078 			set_bit(HCI_RAW, &hdev->flags);
8079 			mgmt_index_added(hdev);
8080 		}
8081 	}
8082 
8083 unlock:
8084 	hci_dev_unlock(hdev);
8085 	return err;
8086 }
8087 
8088 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8089 			      void *data, u16 len)
8090 {
8091 	struct mgmt_cp_set_public_address *cp = data;
8092 	bool changed;
8093 	int err;
8094 
8095 	bt_dev_dbg(hdev, "sock %p", sk);
8096 
8097 	if (hdev_is_powered(hdev))
8098 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8099 				       MGMT_STATUS_REJECTED);
8100 
8101 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8102 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8103 				       MGMT_STATUS_INVALID_PARAMS);
8104 
8105 	if (!hdev->set_bdaddr)
8106 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8107 				       MGMT_STATUS_NOT_SUPPORTED);
8108 
8109 	hci_dev_lock(hdev);
8110 
8111 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8112 	bacpy(&hdev->public_addr, &cp->bdaddr);
8113 
8114 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8115 	if (err < 0)
8116 		goto unlock;
8117 
8118 	if (!changed)
8119 		goto unlock;
8120 
8121 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8122 		err = new_options(hdev, sk);
8123 
8124 	if (is_configured(hdev)) {
8125 		mgmt_index_removed(hdev);
8126 
8127 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8128 
8129 		hci_dev_set_flag(hdev, HCI_CONFIG);
8130 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8131 
8132 		queue_work(hdev->req_workqueue, &hdev->power_on);
8133 	}
8134 
8135 unlock:
8136 	hci_dev_unlock(hdev);
8137 	return err;
8138 }
8139 
8140 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8141 					     int err)
8142 {
8143 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8144 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8145 	u8 *h192, *r192, *h256, *r256;
8146 	struct mgmt_pending_cmd *cmd = data;
8147 	struct sk_buff *skb = cmd->skb;
8148 	u8 status = mgmt_status(err);
8149 	u16 eir_len;
8150 
8151 	if (!status) {
8152 		if (!skb)
8153 			status = MGMT_STATUS_FAILED;
8154 		else if (IS_ERR(skb))
8155 			status = mgmt_status(PTR_ERR(skb));
8156 		else
8157 			status = mgmt_status(skb->data[0]);
8158 	}
8159 
8160 	bt_dev_dbg(hdev, "status %u", status);
8161 
8162 	mgmt_cp = cmd->param;
8163 
8164 	if (status) {
8165 		status = mgmt_status(status);
8166 		eir_len = 0;
8167 
8168 		h192 = NULL;
8169 		r192 = NULL;
8170 		h256 = NULL;
8171 		r256 = NULL;
8172 	} else if (!bredr_sc_enabled(hdev)) {
8173 		struct hci_rp_read_local_oob_data *rp;
8174 
8175 		if (skb->len != sizeof(*rp)) {
8176 			status = MGMT_STATUS_FAILED;
8177 			eir_len = 0;
8178 		} else {
8179 			status = MGMT_STATUS_SUCCESS;
8180 			rp = (void *)skb->data;
8181 
8182 			eir_len = 5 + 18 + 18;
8183 			h192 = rp->hash;
8184 			r192 = rp->rand;
8185 			h256 = NULL;
8186 			r256 = NULL;
8187 		}
8188 	} else {
8189 		struct hci_rp_read_local_oob_ext_data *rp;
8190 
8191 		if (skb->len != sizeof(*rp)) {
8192 			status = MGMT_STATUS_FAILED;
8193 			eir_len = 0;
8194 		} else {
8195 			status = MGMT_STATUS_SUCCESS;
8196 			rp = (void *)skb->data;
8197 
8198 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8199 				eir_len = 5 + 18 + 18;
8200 				h192 = NULL;
8201 				r192 = NULL;
8202 			} else {
8203 				eir_len = 5 + 18 + 18 + 18 + 18;
8204 				h192 = rp->hash192;
8205 				r192 = rp->rand192;
8206 			}
8207 
8208 			h256 = rp->hash256;
8209 			r256 = rp->rand256;
8210 		}
8211 	}
8212 
8213 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8214 	if (!mgmt_rp)
8215 		goto done;
8216 
8217 	if (eir_len == 0)
8218 		goto send_rsp;
8219 
8220 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8221 				  hdev->dev_class, 3);
8222 
8223 	if (h192 && r192) {
8224 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8225 					  EIR_SSP_HASH_C192, h192, 16);
8226 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8227 					  EIR_SSP_RAND_R192, r192, 16);
8228 	}
8229 
8230 	if (h256 && r256) {
8231 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8232 					  EIR_SSP_HASH_C256, h256, 16);
8233 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8234 					  EIR_SSP_RAND_R256, r256, 16);
8235 	}
8236 
8237 send_rsp:
8238 	mgmt_rp->type = mgmt_cp->type;
8239 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8240 
8241 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8242 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8243 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8244 	if (err < 0 || status)
8245 		goto done;
8246 
8247 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8248 
8249 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8250 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8251 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8252 done:
8253 	if (skb && !IS_ERR(skb))
8254 		kfree_skb(skb);
8255 
8256 	kfree(mgmt_rp);
8257 	mgmt_pending_free(cmd);
8258 }
8259 
8260 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8261 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8262 {
8263 	struct mgmt_pending_cmd *cmd;
8264 	int err;
8265 
8266 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8267 			       cp, sizeof(*cp));
8268 	if (!cmd)
8269 		return -ENOMEM;
8270 
8271 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8272 				 read_local_oob_ext_data_complete);
8273 
8274 	if (err < 0) {
8275 		mgmt_pending_remove(cmd);
8276 		return err;
8277 	}
8278 
8279 	return 0;
8280 }
8281 
8282 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8283 				   void *data, u16 data_len)
8284 {
8285 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8286 	struct mgmt_rp_read_local_oob_ext_data *rp;
8287 	size_t rp_len;
8288 	u16 eir_len;
8289 	u8 status, flags, role, addr[7], hash[16], rand[16];
8290 	int err;
8291 
8292 	bt_dev_dbg(hdev, "sock %p", sk);
8293 
8294 	if (hdev_is_powered(hdev)) {
8295 		switch (cp->type) {
8296 		case BIT(BDADDR_BREDR):
8297 			status = mgmt_bredr_support(hdev);
8298 			if (status)
8299 				eir_len = 0;
8300 			else
8301 				eir_len = 5;
8302 			break;
8303 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8304 			status = mgmt_le_support(hdev);
8305 			if (status)
8306 				eir_len = 0;
8307 			else
8308 				eir_len = 9 + 3 + 18 + 18 + 3;
8309 			break;
8310 		default:
8311 			status = MGMT_STATUS_INVALID_PARAMS;
8312 			eir_len = 0;
8313 			break;
8314 		}
8315 	} else {
8316 		status = MGMT_STATUS_NOT_POWERED;
8317 		eir_len = 0;
8318 	}
8319 
8320 	rp_len = sizeof(*rp) + eir_len;
8321 	rp = kmalloc(rp_len, GFP_ATOMIC);
8322 	if (!rp)
8323 		return -ENOMEM;
8324 
8325 	if (!status && !lmp_ssp_capable(hdev)) {
8326 		status = MGMT_STATUS_NOT_SUPPORTED;
8327 		eir_len = 0;
8328 	}
8329 
8330 	if (status)
8331 		goto complete;
8332 
8333 	hci_dev_lock(hdev);
8334 
8335 	eir_len = 0;
8336 	switch (cp->type) {
8337 	case BIT(BDADDR_BREDR):
8338 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8339 			err = read_local_ssp_oob_req(hdev, sk, cp);
8340 			hci_dev_unlock(hdev);
8341 			if (!err)
8342 				goto done;
8343 
8344 			status = MGMT_STATUS_FAILED;
8345 			goto complete;
8346 		} else {
8347 			eir_len = eir_append_data(rp->eir, eir_len,
8348 						  EIR_CLASS_OF_DEV,
8349 						  hdev->dev_class, 3);
8350 		}
8351 		break;
8352 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8353 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8354 		    smp_generate_oob(hdev, hash, rand) < 0) {
8355 			hci_dev_unlock(hdev);
8356 			status = MGMT_STATUS_FAILED;
8357 			goto complete;
8358 		}
8359 
8360 		/* This should return the active RPA, but since the RPA
8361 		 * is only programmed on demand, it is really hard to fill
8362 		 * this in at the moment. For now disallow retrieving
8363 		 * local out-of-band data when privacy is in use.
8364 		 *
8365 		 * Returning the identity address will not help here since
8366 		 * pairing happens before the identity resolving key is
8367 		 * known and thus the connection establishment happens
8368 		 * based on the RPA and not the identity address.
8369 		 */
8370 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8371 			hci_dev_unlock(hdev);
8372 			status = MGMT_STATUS_REJECTED;
8373 			goto complete;
8374 		}
8375 
8376 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8377 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8378 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8379 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8380 			memcpy(addr, &hdev->static_addr, 6);
8381 			addr[6] = 0x01;
8382 		} else {
8383 			memcpy(addr, &hdev->bdaddr, 6);
8384 			addr[6] = 0x00;
8385 		}
8386 
8387 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8388 					  addr, sizeof(addr));
8389 
8390 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8391 			role = 0x02;
8392 		else
8393 			role = 0x01;
8394 
8395 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8396 					  &role, sizeof(role));
8397 
8398 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8399 			eir_len = eir_append_data(rp->eir, eir_len,
8400 						  EIR_LE_SC_CONFIRM,
8401 						  hash, sizeof(hash));
8402 
8403 			eir_len = eir_append_data(rp->eir, eir_len,
8404 						  EIR_LE_SC_RANDOM,
8405 						  rand, sizeof(rand));
8406 		}
8407 
8408 		flags = mgmt_get_adv_discov_flags(hdev);
8409 
8410 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8411 			flags |= LE_AD_NO_BREDR;
8412 
8413 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8414 					  &flags, sizeof(flags));
8415 		break;
8416 	}
8417 
8418 	hci_dev_unlock(hdev);
8419 
8420 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8421 
8422 	status = MGMT_STATUS_SUCCESS;
8423 
8424 complete:
8425 	rp->type = cp->type;
8426 	rp->eir_len = cpu_to_le16(eir_len);
8427 
8428 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8429 				status, rp, sizeof(*rp) + eir_len);
8430 	if (err < 0 || status)
8431 		goto done;
8432 
8433 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8434 				 rp, sizeof(*rp) + eir_len,
8435 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8436 
8437 done:
8438 	kfree(rp);
8439 
8440 	return err;
8441 }
8442 
8443 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8444 {
8445 	u32 flags = 0;
8446 
8447 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8448 	flags |= MGMT_ADV_FLAG_DISCOV;
8449 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8450 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8451 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8452 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8453 	flags |= MGMT_ADV_PARAM_DURATION;
8454 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8455 	flags |= MGMT_ADV_PARAM_INTERVALS;
8456 	flags |= MGMT_ADV_PARAM_TX_POWER;
8457 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8458 
8459 	/* In extended adv TX_POWER returned from Set Adv Param
8460 	 * will be always valid.
8461 	 */
8462 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8463 		flags |= MGMT_ADV_FLAG_TX_POWER;
8464 
8465 	if (ext_adv_capable(hdev)) {
8466 		flags |= MGMT_ADV_FLAG_SEC_1M;
8467 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8468 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8469 
8470 		if (le_2m_capable(hdev))
8471 			flags |= MGMT_ADV_FLAG_SEC_2M;
8472 
8473 		if (le_coded_capable(hdev))
8474 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8475 	}
8476 
8477 	return flags;
8478 }
8479 
8480 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8481 			     void *data, u16 data_len)
8482 {
8483 	struct mgmt_rp_read_adv_features *rp;
8484 	size_t rp_len;
8485 	int err;
8486 	struct adv_info *adv_instance;
8487 	u32 supported_flags;
8488 	u8 *instance;
8489 
8490 	bt_dev_dbg(hdev, "sock %p", sk);
8491 
8492 	if (!lmp_le_capable(hdev))
8493 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8494 				       MGMT_STATUS_REJECTED);
8495 
8496 	hci_dev_lock(hdev);
8497 
8498 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8499 	rp = kmalloc(rp_len, GFP_ATOMIC);
8500 	if (!rp) {
8501 		hci_dev_unlock(hdev);
8502 		return -ENOMEM;
8503 	}
8504 
8505 	supported_flags = get_supported_adv_flags(hdev);
8506 
8507 	rp->supported_flags = cpu_to_le32(supported_flags);
8508 	rp->max_adv_data_len = max_adv_len(hdev);
8509 	rp->max_scan_rsp_len = max_adv_len(hdev);
8510 	rp->max_instances = hdev->le_num_of_adv_sets;
8511 	rp->num_instances = hdev->adv_instance_cnt;
8512 
8513 	instance = rp->instance;
8514 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8515 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8516 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8517 			*instance = adv_instance->instance;
8518 			instance++;
8519 		} else {
8520 			rp->num_instances--;
8521 			rp_len--;
8522 		}
8523 	}
8524 
8525 	hci_dev_unlock(hdev);
8526 
8527 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8528 				MGMT_STATUS_SUCCESS, rp, rp_len);
8529 
8530 	kfree(rp);
8531 
8532 	return err;
8533 }
8534 
8535 static u8 calculate_name_len(struct hci_dev *hdev)
8536 {
8537 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8538 
8539 	return eir_append_local_name(hdev, buf, 0);
8540 }
8541 
8542 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8543 			   bool is_adv_data)
8544 {
8545 	u8 max_len = max_adv_len(hdev);
8546 
8547 	if (is_adv_data) {
8548 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8549 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8550 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8551 			max_len -= 3;
8552 
8553 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8554 			max_len -= 3;
8555 	} else {
8556 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8557 			max_len -= calculate_name_len(hdev);
8558 
8559 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8560 			max_len -= 4;
8561 	}
8562 
8563 	return max_len;
8564 }
8565 
8566 static bool flags_managed(u32 adv_flags)
8567 {
8568 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8569 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8570 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8571 }
8572 
8573 static bool tx_power_managed(u32 adv_flags)
8574 {
8575 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8576 }
8577 
8578 static bool name_managed(u32 adv_flags)
8579 {
8580 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8581 }
8582 
8583 static bool appearance_managed(u32 adv_flags)
8584 {
8585 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8586 }
8587 
8588 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8589 			      u8 len, bool is_adv_data)
8590 {
8591 	int i, cur_len;
8592 	u8 max_len;
8593 
8594 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8595 
8596 	if (len > max_len)
8597 		return false;
8598 
8599 	/* Make sure that the data is correctly formatted. */
8600 	for (i = 0; i < len; i += (cur_len + 1)) {
8601 		cur_len = data[i];
8602 
8603 		if (!cur_len)
8604 			continue;
8605 
8606 		if (data[i + 1] == EIR_FLAGS &&
8607 		    (!is_adv_data || flags_managed(adv_flags)))
8608 			return false;
8609 
8610 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8611 			return false;
8612 
8613 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8614 			return false;
8615 
8616 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8617 			return false;
8618 
8619 		if (data[i + 1] == EIR_APPEARANCE &&
8620 		    appearance_managed(adv_flags))
8621 			return false;
8622 
8623 		/* If the current field length would exceed the total data
8624 		 * length, then it's invalid.
8625 		 */
8626 		if (i + cur_len >= len)
8627 			return false;
8628 	}
8629 
8630 	return true;
8631 }
8632 
8633 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8634 {
8635 	u32 supported_flags, phy_flags;
8636 
8637 	/* The current implementation only supports a subset of the specified
8638 	 * flags. Also need to check mutual exclusiveness of sec flags.
8639 	 */
8640 	supported_flags = get_supported_adv_flags(hdev);
8641 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8642 	if (adv_flags & ~supported_flags ||
8643 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8644 		return false;
8645 
8646 	return true;
8647 }
8648 
8649 static bool adv_busy(struct hci_dev *hdev)
8650 {
8651 	return pending_find(MGMT_OP_SET_LE, hdev);
8652 }
8653 
8654 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8655 			     int err)
8656 {
8657 	struct adv_info *adv, *n;
8658 
8659 	bt_dev_dbg(hdev, "err %d", err);
8660 
8661 	hci_dev_lock(hdev);
8662 
8663 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8664 		u8 instance;
8665 
8666 		if (!adv->pending)
8667 			continue;
8668 
8669 		if (!err) {
8670 			adv->pending = false;
8671 			continue;
8672 		}
8673 
8674 		instance = adv->instance;
8675 
8676 		if (hdev->cur_adv_instance == instance)
8677 			cancel_adv_timeout(hdev);
8678 
8679 		hci_remove_adv_instance(hdev, instance);
8680 		mgmt_advertising_removed(sk, hdev, instance);
8681 	}
8682 
8683 	hci_dev_unlock(hdev);
8684 }
8685 
8686 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8687 {
8688 	struct mgmt_pending_cmd *cmd = data;
8689 	struct mgmt_cp_add_advertising *cp = cmd->param;
8690 	struct mgmt_rp_add_advertising rp;
8691 
8692 	memset(&rp, 0, sizeof(rp));
8693 
8694 	rp.instance = cp->instance;
8695 
8696 	if (err)
8697 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8698 				mgmt_status(err));
8699 	else
8700 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8701 				  mgmt_status(err), &rp, sizeof(rp));
8702 
8703 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8704 
8705 	mgmt_pending_free(cmd);
8706 }
8707 
8708 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8709 {
8710 	struct mgmt_pending_cmd *cmd = data;
8711 	struct mgmt_cp_add_advertising *cp = cmd->param;
8712 
8713 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8714 }
8715 
8716 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8717 			   void *data, u16 data_len)
8718 {
8719 	struct mgmt_cp_add_advertising *cp = data;
8720 	struct mgmt_rp_add_advertising rp;
8721 	u32 flags;
8722 	u8 status;
8723 	u16 timeout, duration;
8724 	unsigned int prev_instance_cnt;
8725 	u8 schedule_instance = 0;
8726 	struct adv_info *adv, *next_instance;
8727 	int err;
8728 	struct mgmt_pending_cmd *cmd;
8729 
8730 	bt_dev_dbg(hdev, "sock %p", sk);
8731 
8732 	status = mgmt_le_support(hdev);
8733 	if (status)
8734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8735 				       status);
8736 
8737 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8738 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8739 				       MGMT_STATUS_INVALID_PARAMS);
8740 
8741 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8743 				       MGMT_STATUS_INVALID_PARAMS);
8744 
8745 	flags = __le32_to_cpu(cp->flags);
8746 	timeout = __le16_to_cpu(cp->timeout);
8747 	duration = __le16_to_cpu(cp->duration);
8748 
8749 	if (!requested_adv_flags_are_valid(hdev, flags))
8750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8751 				       MGMT_STATUS_INVALID_PARAMS);
8752 
8753 	hci_dev_lock(hdev);
8754 
8755 	if (timeout && !hdev_is_powered(hdev)) {
8756 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8757 				      MGMT_STATUS_REJECTED);
8758 		goto unlock;
8759 	}
8760 
8761 	if (adv_busy(hdev)) {
8762 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8763 				      MGMT_STATUS_BUSY);
8764 		goto unlock;
8765 	}
8766 
8767 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8768 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8769 			       cp->scan_rsp_len, false)) {
8770 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8771 				      MGMT_STATUS_INVALID_PARAMS);
8772 		goto unlock;
8773 	}
8774 
8775 	prev_instance_cnt = hdev->adv_instance_cnt;
8776 
8777 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8778 				   cp->adv_data_len, cp->data,
8779 				   cp->scan_rsp_len,
8780 				   cp->data + cp->adv_data_len,
8781 				   timeout, duration,
8782 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8783 				   hdev->le_adv_min_interval,
8784 				   hdev->le_adv_max_interval, 0);
8785 	if (IS_ERR(adv)) {
8786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8787 				      MGMT_STATUS_FAILED);
8788 		goto unlock;
8789 	}
8790 
8791 	/* Only trigger an advertising added event if a new instance was
8792 	 * actually added.
8793 	 */
8794 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8795 		mgmt_advertising_added(sk, hdev, cp->instance);
8796 
8797 	if (hdev->cur_adv_instance == cp->instance) {
8798 		/* If the currently advertised instance is being changed then
8799 		 * cancel the current advertising and schedule the next
8800 		 * instance. If there is only one instance then the overridden
8801 		 * advertising data will be visible right away.
8802 		 */
8803 		cancel_adv_timeout(hdev);
8804 
8805 		next_instance = hci_get_next_instance(hdev, cp->instance);
8806 		if (next_instance)
8807 			schedule_instance = next_instance->instance;
8808 	} else if (!hdev->adv_instance_timeout) {
8809 		/* Immediately advertise the new instance if no other
8810 		 * instance is currently being advertised.
8811 		 */
8812 		schedule_instance = cp->instance;
8813 	}
8814 
8815 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8816 	 * there is no instance to be advertised then we have no HCI
8817 	 * communication to make. Simply return.
8818 	 */
8819 	if (!hdev_is_powered(hdev) ||
8820 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8821 	    !schedule_instance) {
8822 		rp.instance = cp->instance;
8823 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8824 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8825 		goto unlock;
8826 	}
8827 
8828 	/* We're good to go, update advertising data, parameters, and start
8829 	 * advertising.
8830 	 */
8831 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8832 			       data_len);
8833 	if (!cmd) {
8834 		err = -ENOMEM;
8835 		goto unlock;
8836 	}
8837 
8838 	cp->instance = schedule_instance;
8839 
8840 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8841 				 add_advertising_complete);
8842 	if (err < 0)
8843 		mgmt_pending_free(cmd);
8844 
8845 unlock:
8846 	hci_dev_unlock(hdev);
8847 
8848 	return err;
8849 }
8850 
8851 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8852 					int err)
8853 {
8854 	struct mgmt_pending_cmd *cmd = data;
8855 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8856 	struct mgmt_rp_add_ext_adv_params rp;
8857 	struct adv_info *adv;
8858 	u32 flags;
8859 
8860 	BT_DBG("%s", hdev->name);
8861 
8862 	hci_dev_lock(hdev);
8863 
8864 	adv = hci_find_adv_instance(hdev, cp->instance);
8865 	if (!adv)
8866 		goto unlock;
8867 
8868 	rp.instance = cp->instance;
8869 	rp.tx_power = adv->tx_power;
8870 
8871 	/* While we're at it, inform userspace of the available space for this
8872 	 * advertisement, given the flags that will be used.
8873 	 */
8874 	flags = __le32_to_cpu(cp->flags);
8875 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8876 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8877 
8878 	if (err) {
8879 		/* If this advertisement was previously advertising and we
8880 		 * failed to update it, we signal that it has been removed and
8881 		 * delete its structure
8882 		 */
8883 		if (!adv->pending)
8884 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8885 
8886 		hci_remove_adv_instance(hdev, cp->instance);
8887 
8888 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8889 				mgmt_status(err));
8890 	} else {
8891 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8892 				  mgmt_status(err), &rp, sizeof(rp));
8893 	}
8894 
8895 unlock:
8896 	mgmt_pending_free(cmd);
8897 
8898 	hci_dev_unlock(hdev);
8899 }
8900 
8901 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8902 {
8903 	struct mgmt_pending_cmd *cmd = data;
8904 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8905 
8906 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8907 }
8908 
8909 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8910 			      void *data, u16 data_len)
8911 {
8912 	struct mgmt_cp_add_ext_adv_params *cp = data;
8913 	struct mgmt_rp_add_ext_adv_params rp;
8914 	struct mgmt_pending_cmd *cmd = NULL;
8915 	struct adv_info *adv;
8916 	u32 flags, min_interval, max_interval;
8917 	u16 timeout, duration;
8918 	u8 status;
8919 	s8 tx_power;
8920 	int err;
8921 
8922 	BT_DBG("%s", hdev->name);
8923 
8924 	status = mgmt_le_support(hdev);
8925 	if (status)
8926 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8927 				       status);
8928 
8929 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8931 				       MGMT_STATUS_INVALID_PARAMS);
8932 
8933 	/* The purpose of breaking add_advertising into two separate MGMT calls
8934 	 * for params and data is to allow more parameters to be added to this
8935 	 * structure in the future. For this reason, we verify that we have the
8936 	 * bare minimum structure we know of when the interface was defined. Any
8937 	 * extra parameters we don't know about will be ignored in this request.
8938 	 */
8939 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8940 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8941 				       MGMT_STATUS_INVALID_PARAMS);
8942 
8943 	flags = __le32_to_cpu(cp->flags);
8944 
8945 	if (!requested_adv_flags_are_valid(hdev, flags))
8946 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8947 				       MGMT_STATUS_INVALID_PARAMS);
8948 
8949 	hci_dev_lock(hdev);
8950 
8951 	/* In new interface, we require that we are powered to register */
8952 	if (!hdev_is_powered(hdev)) {
8953 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8954 				      MGMT_STATUS_REJECTED);
8955 		goto unlock;
8956 	}
8957 
8958 	if (adv_busy(hdev)) {
8959 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8960 				      MGMT_STATUS_BUSY);
8961 		goto unlock;
8962 	}
8963 
8964 	/* Parse defined parameters from request, use defaults otherwise */
8965 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8966 		  __le16_to_cpu(cp->timeout) : 0;
8967 
8968 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8969 		   __le16_to_cpu(cp->duration) :
8970 		   hdev->def_multi_adv_rotation_duration;
8971 
8972 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8973 		       __le32_to_cpu(cp->min_interval) :
8974 		       hdev->le_adv_min_interval;
8975 
8976 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8977 		       __le32_to_cpu(cp->max_interval) :
8978 		       hdev->le_adv_max_interval;
8979 
8980 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8981 		   cp->tx_power :
8982 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8983 
8984 	/* Create advertising instance with no advertising or response data */
8985 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8986 				   timeout, duration, tx_power, min_interval,
8987 				   max_interval, 0);
8988 
8989 	if (IS_ERR(adv)) {
8990 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8991 				      MGMT_STATUS_FAILED);
8992 		goto unlock;
8993 	}
8994 
8995 	/* Submit request for advertising params if ext adv available */
8996 	if (ext_adv_capable(hdev)) {
8997 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8998 				       data, data_len);
8999 		if (!cmd) {
9000 			err = -ENOMEM;
9001 			hci_remove_adv_instance(hdev, cp->instance);
9002 			goto unlock;
9003 		}
9004 
9005 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9006 					 add_ext_adv_params_complete);
9007 		if (err < 0)
9008 			mgmt_pending_free(cmd);
9009 	} else {
9010 		rp.instance = cp->instance;
9011 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9012 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9013 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9014 		err = mgmt_cmd_complete(sk, hdev->id,
9015 					MGMT_OP_ADD_EXT_ADV_PARAMS,
9016 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9017 	}
9018 
9019 unlock:
9020 	hci_dev_unlock(hdev);
9021 
9022 	return err;
9023 }
9024 
9025 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9026 {
9027 	struct mgmt_pending_cmd *cmd = data;
9028 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9029 	struct mgmt_rp_add_advertising rp;
9030 
9031 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
9032 
9033 	memset(&rp, 0, sizeof(rp));
9034 
9035 	rp.instance = cp->instance;
9036 
9037 	if (err)
9038 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9039 				mgmt_status(err));
9040 	else
9041 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9042 				  mgmt_status(err), &rp, sizeof(rp));
9043 
9044 	mgmt_pending_free(cmd);
9045 }
9046 
9047 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9048 {
9049 	struct mgmt_pending_cmd *cmd = data;
9050 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9051 	int err;
9052 
9053 	if (ext_adv_capable(hdev)) {
9054 		err = hci_update_adv_data_sync(hdev, cp->instance);
9055 		if (err)
9056 			return err;
9057 
9058 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9059 		if (err)
9060 			return err;
9061 
9062 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
9063 	}
9064 
9065 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9066 }
9067 
9068 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9069 			    u16 data_len)
9070 {
9071 	struct mgmt_cp_add_ext_adv_data *cp = data;
9072 	struct mgmt_rp_add_ext_adv_data rp;
9073 	u8 schedule_instance = 0;
9074 	struct adv_info *next_instance;
9075 	struct adv_info *adv_instance;
9076 	int err = 0;
9077 	struct mgmt_pending_cmd *cmd;
9078 
9079 	BT_DBG("%s", hdev->name);
9080 
9081 	hci_dev_lock(hdev);
9082 
9083 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9084 
9085 	if (!adv_instance) {
9086 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9087 				      MGMT_STATUS_INVALID_PARAMS);
9088 		goto unlock;
9089 	}
9090 
9091 	/* In new interface, we require that we are powered to register */
9092 	if (!hdev_is_powered(hdev)) {
9093 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9094 				      MGMT_STATUS_REJECTED);
9095 		goto clear_new_instance;
9096 	}
9097 
9098 	if (adv_busy(hdev)) {
9099 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9100 				      MGMT_STATUS_BUSY);
9101 		goto clear_new_instance;
9102 	}
9103 
9104 	/* Validate new data */
9105 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9106 			       cp->adv_data_len, true) ||
9107 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9108 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9109 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9110 				      MGMT_STATUS_INVALID_PARAMS);
9111 		goto clear_new_instance;
9112 	}
9113 
9114 	/* Set the data in the advertising instance */
9115 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9116 				  cp->data, cp->scan_rsp_len,
9117 				  cp->data + cp->adv_data_len);
9118 
9119 	/* If using software rotation, determine next instance to use */
9120 	if (hdev->cur_adv_instance == cp->instance) {
9121 		/* If the currently advertised instance is being changed
9122 		 * then cancel the current advertising and schedule the
9123 		 * next instance. If there is only one instance then the
9124 		 * overridden advertising data will be visible right
9125 		 * away
9126 		 */
9127 		cancel_adv_timeout(hdev);
9128 
9129 		next_instance = hci_get_next_instance(hdev, cp->instance);
9130 		if (next_instance)
9131 			schedule_instance = next_instance->instance;
9132 	} else if (!hdev->adv_instance_timeout) {
9133 		/* Immediately advertise the new instance if no other
9134 		 * instance is currently being advertised.
9135 		 */
9136 		schedule_instance = cp->instance;
9137 	}
9138 
9139 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9140 	 * be advertised then we have no HCI communication to make.
9141 	 * Simply return.
9142 	 */
9143 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9144 		if (adv_instance->pending) {
9145 			mgmt_advertising_added(sk, hdev, cp->instance);
9146 			adv_instance->pending = false;
9147 		}
9148 		rp.instance = cp->instance;
9149 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9150 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9151 		goto unlock;
9152 	}
9153 
9154 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9155 			       data_len);
9156 	if (!cmd) {
9157 		err = -ENOMEM;
9158 		goto clear_new_instance;
9159 	}
9160 
9161 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9162 				 add_ext_adv_data_complete);
9163 	if (err < 0) {
9164 		mgmt_pending_free(cmd);
9165 		goto clear_new_instance;
9166 	}
9167 
9168 	/* We were successful in updating data, so trigger advertising_added
9169 	 * event if this is an instance that wasn't previously advertising. If
9170 	 * a failure occurs in the requests we initiated, we will remove the
9171 	 * instance again in add_advertising_complete
9172 	 */
9173 	if (adv_instance->pending)
9174 		mgmt_advertising_added(sk, hdev, cp->instance);
9175 
9176 	goto unlock;
9177 
9178 clear_new_instance:
9179 	hci_remove_adv_instance(hdev, cp->instance);
9180 
9181 unlock:
9182 	hci_dev_unlock(hdev);
9183 
9184 	return err;
9185 }
9186 
9187 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9188 					int err)
9189 {
9190 	struct mgmt_pending_cmd *cmd = data;
9191 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9192 	struct mgmt_rp_remove_advertising rp;
9193 
9194 	bt_dev_dbg(hdev, "err %d", err);
9195 
9196 	memset(&rp, 0, sizeof(rp));
9197 	rp.instance = cp->instance;
9198 
9199 	if (err)
9200 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9201 				mgmt_status(err));
9202 	else
9203 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9204 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9205 
9206 	mgmt_pending_free(cmd);
9207 }
9208 
9209 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9210 {
9211 	struct mgmt_pending_cmd *cmd = data;
9212 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9213 	int err;
9214 
9215 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9216 	if (err)
9217 		return err;
9218 
9219 	if (list_empty(&hdev->adv_instances))
9220 		err = hci_disable_advertising_sync(hdev);
9221 
9222 	return err;
9223 }
9224 
9225 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9226 			      void *data, u16 data_len)
9227 {
9228 	struct mgmt_cp_remove_advertising *cp = data;
9229 	struct mgmt_pending_cmd *cmd;
9230 	int err;
9231 
9232 	bt_dev_dbg(hdev, "sock %p", sk);
9233 
9234 	hci_dev_lock(hdev);
9235 
9236 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9237 		err = mgmt_cmd_status(sk, hdev->id,
9238 				      MGMT_OP_REMOVE_ADVERTISING,
9239 				      MGMT_STATUS_INVALID_PARAMS);
9240 		goto unlock;
9241 	}
9242 
9243 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9244 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9245 				      MGMT_STATUS_BUSY);
9246 		goto unlock;
9247 	}
9248 
9249 	if (list_empty(&hdev->adv_instances)) {
9250 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9251 				      MGMT_STATUS_INVALID_PARAMS);
9252 		goto unlock;
9253 	}
9254 
9255 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9256 			       data_len);
9257 	if (!cmd) {
9258 		err = -ENOMEM;
9259 		goto unlock;
9260 	}
9261 
9262 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9263 				 remove_advertising_complete);
9264 	if (err < 0)
9265 		mgmt_pending_free(cmd);
9266 
9267 unlock:
9268 	hci_dev_unlock(hdev);
9269 
9270 	return err;
9271 }
9272 
9273 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9274 			     void *data, u16 data_len)
9275 {
9276 	struct mgmt_cp_get_adv_size_info *cp = data;
9277 	struct mgmt_rp_get_adv_size_info rp;
9278 	u32 flags, supported_flags;
9279 
9280 	bt_dev_dbg(hdev, "sock %p", sk);
9281 
9282 	if (!lmp_le_capable(hdev))
9283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9284 				       MGMT_STATUS_REJECTED);
9285 
9286 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9288 				       MGMT_STATUS_INVALID_PARAMS);
9289 
9290 	flags = __le32_to_cpu(cp->flags);
9291 
9292 	/* The current implementation only supports a subset of the specified
9293 	 * flags.
9294 	 */
9295 	supported_flags = get_supported_adv_flags(hdev);
9296 	if (flags & ~supported_flags)
9297 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9298 				       MGMT_STATUS_INVALID_PARAMS);
9299 
9300 	rp.instance = cp->instance;
9301 	rp.flags = cp->flags;
9302 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9303 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9304 
9305 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9306 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9307 }
9308 
9309 static const struct hci_mgmt_handler mgmt_handlers[] = {
9310 	{ NULL }, /* 0x0000 (no command) */
9311 	{ read_version,            MGMT_READ_VERSION_SIZE,
9312 						HCI_MGMT_NO_HDEV |
9313 						HCI_MGMT_UNTRUSTED },
9314 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9315 						HCI_MGMT_NO_HDEV |
9316 						HCI_MGMT_UNTRUSTED },
9317 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9318 						HCI_MGMT_NO_HDEV |
9319 						HCI_MGMT_UNTRUSTED },
9320 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9321 						HCI_MGMT_UNTRUSTED },
9322 	{ set_powered,             MGMT_SETTING_SIZE },
9323 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9324 	{ set_connectable,         MGMT_SETTING_SIZE },
9325 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9326 	{ set_bondable,            MGMT_SETTING_SIZE },
9327 	{ set_link_security,       MGMT_SETTING_SIZE },
9328 	{ set_ssp,                 MGMT_SETTING_SIZE },
9329 	{ set_hs,                  MGMT_SETTING_SIZE },
9330 	{ set_le,                  MGMT_SETTING_SIZE },
9331 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9332 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9333 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9334 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9335 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9336 						HCI_MGMT_VAR_LEN },
9337 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9338 						HCI_MGMT_VAR_LEN },
9339 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9340 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9341 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9342 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9343 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9344 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9345 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9346 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9347 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9348 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9349 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9350 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9351 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9352 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9353 						HCI_MGMT_VAR_LEN },
9354 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9355 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9356 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9357 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9358 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9359 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9360 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9361 	{ set_advertising,         MGMT_SETTING_SIZE },
9362 	{ set_bredr,               MGMT_SETTING_SIZE },
9363 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9364 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9365 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9366 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9367 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9368 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9369 						HCI_MGMT_VAR_LEN },
9370 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9371 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9372 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9373 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9374 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9375 						HCI_MGMT_VAR_LEN },
9376 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9377 						HCI_MGMT_NO_HDEV |
9378 						HCI_MGMT_UNTRUSTED },
9379 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9380 						HCI_MGMT_UNCONFIGURED |
9381 						HCI_MGMT_UNTRUSTED },
9382 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9383 						HCI_MGMT_UNCONFIGURED },
9384 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9385 						HCI_MGMT_UNCONFIGURED },
9386 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9387 						HCI_MGMT_VAR_LEN },
9388 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9389 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9390 						HCI_MGMT_NO_HDEV |
9391 						HCI_MGMT_UNTRUSTED },
9392 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9393 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9394 						HCI_MGMT_VAR_LEN },
9395 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9396 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9397 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9398 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9399 						HCI_MGMT_UNTRUSTED },
9400 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9401 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9402 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9403 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9404 						HCI_MGMT_VAR_LEN },
9405 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9406 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9407 						HCI_MGMT_UNTRUSTED },
9408 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9409 						HCI_MGMT_UNTRUSTED |
9410 						HCI_MGMT_HDEV_OPTIONAL },
9411 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9412 						HCI_MGMT_VAR_LEN |
9413 						HCI_MGMT_HDEV_OPTIONAL },
9414 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9415 						HCI_MGMT_UNTRUSTED },
9416 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9417 						HCI_MGMT_VAR_LEN },
9418 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9419 						HCI_MGMT_UNTRUSTED },
9420 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9421 						HCI_MGMT_VAR_LEN },
9422 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9423 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9424 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9425 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9426 						HCI_MGMT_VAR_LEN },
9427 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9428 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9429 						HCI_MGMT_VAR_LEN },
9430 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9431 						HCI_MGMT_VAR_LEN },
9432 	{ add_adv_patterns_monitor_rssi,
9433 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9434 						HCI_MGMT_VAR_LEN },
9435 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9436 						HCI_MGMT_VAR_LEN },
9437 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9438 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9439 						HCI_MGMT_VAR_LEN },
9440 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9441 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9442 };
9443 
9444 void mgmt_index_added(struct hci_dev *hdev)
9445 {
9446 	struct mgmt_ev_ext_index ev;
9447 
9448 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9449 		return;
9450 
9451 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9452 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9453 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9454 		ev.type = 0x01;
9455 	} else {
9456 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9457 				 HCI_MGMT_INDEX_EVENTS);
9458 		ev.type = 0x00;
9459 	}
9460 
9461 	ev.bus = hdev->bus;
9462 
9463 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9464 			 HCI_MGMT_EXT_INDEX_EVENTS);
9465 }
9466 
9467 void mgmt_index_removed(struct hci_dev *hdev)
9468 {
9469 	struct mgmt_ev_ext_index ev;
9470 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9471 
9472 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9473 		return;
9474 
9475 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9476 
9477 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9478 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9479 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9480 		ev.type = 0x01;
9481 	} else {
9482 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9483 				 HCI_MGMT_INDEX_EVENTS);
9484 		ev.type = 0x00;
9485 	}
9486 
9487 	ev.bus = hdev->bus;
9488 
9489 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9490 			 HCI_MGMT_EXT_INDEX_EVENTS);
9491 
9492 	/* Cancel any remaining timed work */
9493 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9494 		return;
9495 	cancel_delayed_work_sync(&hdev->discov_off);
9496 	cancel_delayed_work_sync(&hdev->service_cache);
9497 	cancel_delayed_work_sync(&hdev->rpa_expired);
9498 }
9499 
9500 void mgmt_power_on(struct hci_dev *hdev, int err)
9501 {
9502 	struct cmd_lookup match = { NULL, hdev };
9503 
9504 	bt_dev_dbg(hdev, "err %d", err);
9505 
9506 	hci_dev_lock(hdev);
9507 
9508 	if (!err) {
9509 		restart_le_actions(hdev);
9510 		hci_update_passive_scan(hdev);
9511 	}
9512 
9513 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9514 			     &match);
9515 
9516 	new_settings(hdev, match.sk);
9517 
9518 	if (match.sk)
9519 		sock_put(match.sk);
9520 
9521 	hci_dev_unlock(hdev);
9522 }
9523 
9524 void __mgmt_power_off(struct hci_dev *hdev)
9525 {
9526 	struct cmd_lookup match = { NULL, hdev };
9527 	u8 zero_cod[] = { 0, 0, 0 };
9528 
9529 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9530 			     &match);
9531 
9532 	/* If the power off is because of hdev unregistration let
9533 	 * use the appropriate INVALID_INDEX status. Otherwise use
9534 	 * NOT_POWERED. We cover both scenarios here since later in
9535 	 * mgmt_index_removed() any hci_conn callbacks will have already
9536 	 * been triggered, potentially causing misleading DISCONNECTED
9537 	 * status responses.
9538 	 */
9539 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9540 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9541 	else
9542 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9543 
9544 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9545 
9546 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9547 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9548 				   zero_cod, sizeof(zero_cod),
9549 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9550 		ext_info_changed(hdev, NULL);
9551 	}
9552 
9553 	new_settings(hdev, match.sk);
9554 
9555 	if (match.sk)
9556 		sock_put(match.sk);
9557 }
9558 
9559 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9560 {
9561 	struct mgmt_pending_cmd *cmd;
9562 	u8 status;
9563 
9564 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9565 	if (!cmd)
9566 		return;
9567 
9568 	if (err == -ERFKILL)
9569 		status = MGMT_STATUS_RFKILLED;
9570 	else
9571 		status = MGMT_STATUS_FAILED;
9572 
9573 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9574 
9575 	mgmt_pending_remove(cmd);
9576 }
9577 
9578 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9579 		       bool persistent)
9580 {
9581 	struct mgmt_ev_new_link_key ev;
9582 
9583 	memset(&ev, 0, sizeof(ev));
9584 
9585 	ev.store_hint = persistent;
9586 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9587 	ev.key.addr.type = BDADDR_BREDR;
9588 	ev.key.type = key->type;
9589 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9590 	ev.key.pin_len = key->pin_len;
9591 
9592 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9593 }
9594 
9595 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9596 {
9597 	switch (ltk->type) {
9598 	case SMP_LTK:
9599 	case SMP_LTK_RESPONDER:
9600 		if (ltk->authenticated)
9601 			return MGMT_LTK_AUTHENTICATED;
9602 		return MGMT_LTK_UNAUTHENTICATED;
9603 	case SMP_LTK_P256:
9604 		if (ltk->authenticated)
9605 			return MGMT_LTK_P256_AUTH;
9606 		return MGMT_LTK_P256_UNAUTH;
9607 	case SMP_LTK_P256_DEBUG:
9608 		return MGMT_LTK_P256_DEBUG;
9609 	}
9610 
9611 	return MGMT_LTK_UNAUTHENTICATED;
9612 }
9613 
9614 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9615 {
9616 	struct mgmt_ev_new_long_term_key ev;
9617 
9618 	memset(&ev, 0, sizeof(ev));
9619 
9620 	/* Devices using resolvable or non-resolvable random addresses
9621 	 * without providing an identity resolving key don't require
9622 	 * to store long term keys. Their addresses will change the
9623 	 * next time around.
9624 	 *
9625 	 * Only when a remote device provides an identity address
9626 	 * make sure the long term key is stored. If the remote
9627 	 * identity is known, the long term keys are internally
9628 	 * mapped to the identity address. So allow static random
9629 	 * and public addresses here.
9630 	 */
9631 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9632 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9633 		ev.store_hint = 0x00;
9634 	else
9635 		ev.store_hint = persistent;
9636 
9637 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9638 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9639 	ev.key.type = mgmt_ltk_type(key);
9640 	ev.key.enc_size = key->enc_size;
9641 	ev.key.ediv = key->ediv;
9642 	ev.key.rand = key->rand;
9643 
9644 	if (key->type == SMP_LTK)
9645 		ev.key.initiator = 1;
9646 
9647 	/* Make sure we copy only the significant bytes based on the
9648 	 * encryption key size, and set the rest of the value to zeroes.
9649 	 */
9650 	memcpy(ev.key.val, key->val, key->enc_size);
9651 	memset(ev.key.val + key->enc_size, 0,
9652 	       sizeof(ev.key.val) - key->enc_size);
9653 
9654 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9655 }
9656 
9657 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9658 {
9659 	struct mgmt_ev_new_irk ev;
9660 
9661 	memset(&ev, 0, sizeof(ev));
9662 
9663 	ev.store_hint = persistent;
9664 
9665 	bacpy(&ev.rpa, &irk->rpa);
9666 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9667 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9668 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9669 
9670 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9671 }
9672 
9673 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9674 		   bool persistent)
9675 {
9676 	struct mgmt_ev_new_csrk ev;
9677 
9678 	memset(&ev, 0, sizeof(ev));
9679 
9680 	/* Devices using resolvable or non-resolvable random addresses
9681 	 * without providing an identity resolving key don't require
9682 	 * to store signature resolving keys. Their addresses will change
9683 	 * the next time around.
9684 	 *
9685 	 * Only when a remote device provides an identity address
9686 	 * make sure the signature resolving key is stored. So allow
9687 	 * static random and public addresses here.
9688 	 */
9689 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9690 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9691 		ev.store_hint = 0x00;
9692 	else
9693 		ev.store_hint = persistent;
9694 
9695 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9696 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9697 	ev.key.type = csrk->type;
9698 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9699 
9700 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9701 }
9702 
9703 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9704 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9705 			 u16 max_interval, u16 latency, u16 timeout)
9706 {
9707 	struct mgmt_ev_new_conn_param ev;
9708 
9709 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9710 		return;
9711 
9712 	memset(&ev, 0, sizeof(ev));
9713 	bacpy(&ev.addr.bdaddr, bdaddr);
9714 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9715 	ev.store_hint = store_hint;
9716 	ev.min_interval = cpu_to_le16(min_interval);
9717 	ev.max_interval = cpu_to_le16(max_interval);
9718 	ev.latency = cpu_to_le16(latency);
9719 	ev.timeout = cpu_to_le16(timeout);
9720 
9721 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9722 }
9723 
9724 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9725 			   u8 *name, u8 name_len)
9726 {
9727 	struct sk_buff *skb;
9728 	struct mgmt_ev_device_connected *ev;
9729 	u16 eir_len = 0;
9730 	u32 flags = 0;
9731 
9732 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9733 		return;
9734 
9735 	/* allocate buff for LE or BR/EDR adv */
9736 	if (conn->le_adv_data_len > 0)
9737 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9738 				     sizeof(*ev) + conn->le_adv_data_len);
9739 	else
9740 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9741 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9742 				     eir_precalc_len(sizeof(conn->dev_class)));
9743 
9744 	if (!skb)
9745 		return;
9746 
9747 	ev = skb_put(skb, sizeof(*ev));
9748 	bacpy(&ev->addr.bdaddr, &conn->dst);
9749 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9750 
9751 	if (conn->out)
9752 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9753 
9754 	ev->flags = __cpu_to_le32(flags);
9755 
9756 	/* We must ensure that the EIR Data fields are ordered and
9757 	 * unique. Keep it simple for now and avoid the problem by not
9758 	 * adding any BR/EDR data to the LE adv.
9759 	 */
9760 	if (conn->le_adv_data_len > 0) {
9761 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9762 		eir_len = conn->le_adv_data_len;
9763 	} else {
9764 		if (name)
9765 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9766 
9767 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9768 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9769 						    conn->dev_class, sizeof(conn->dev_class));
9770 	}
9771 
9772 	ev->eir_len = cpu_to_le16(eir_len);
9773 
9774 	mgmt_event_skb(skb, NULL);
9775 }
9776 
9777 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9778 {
9779 	struct hci_dev *hdev = data;
9780 	struct mgmt_cp_unpair_device *cp = cmd->param;
9781 
9782 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9783 
9784 	cmd->cmd_complete(cmd, 0);
9785 }
9786 
9787 bool mgmt_powering_down(struct hci_dev *hdev)
9788 {
9789 	struct mgmt_pending_cmd *cmd;
9790 	struct mgmt_mode *cp;
9791 
9792 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9793 		return true;
9794 
9795 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9796 	if (!cmd)
9797 		return false;
9798 
9799 	cp = cmd->param;
9800 	if (!cp->val)
9801 		return true;
9802 
9803 	return false;
9804 }
9805 
9806 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9807 			      u8 link_type, u8 addr_type, u8 reason,
9808 			      bool mgmt_connected)
9809 {
9810 	struct mgmt_ev_device_disconnected ev;
9811 	struct sock *sk = NULL;
9812 
9813 	if (!mgmt_connected)
9814 		return;
9815 
9816 	if (link_type != ACL_LINK &&
9817 	    link_type != LE_LINK  &&
9818 	    link_type != BIS_LINK)
9819 		return;
9820 
9821 	bacpy(&ev.addr.bdaddr, bdaddr);
9822 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9823 	ev.reason = reason;
9824 
9825 	/* Report disconnects due to suspend */
9826 	if (hdev->suspended)
9827 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9828 
9829 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9830 
9831 	if (sk)
9832 		sock_put(sk);
9833 }
9834 
9835 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9836 			    u8 link_type, u8 addr_type, u8 status)
9837 {
9838 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9839 	struct mgmt_cp_disconnect *cp;
9840 	struct mgmt_pending_cmd *cmd;
9841 
9842 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9843 			     unpair_device_rsp, hdev);
9844 
9845 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9846 	if (!cmd)
9847 		return;
9848 
9849 	cp = cmd->param;
9850 
9851 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9852 		return;
9853 
9854 	if (cp->addr.type != bdaddr_type)
9855 		return;
9856 
9857 	cmd->cmd_complete(cmd, mgmt_status(status));
9858 	mgmt_pending_remove(cmd);
9859 }
9860 
9861 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9862 {
9863 	struct mgmt_ev_connect_failed ev;
9864 
9865 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9866 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9867 					 conn->dst_type, status, true);
9868 		return;
9869 	}
9870 
9871 	bacpy(&ev.addr.bdaddr, &conn->dst);
9872 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9873 	ev.status = mgmt_status(status);
9874 
9875 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9876 }
9877 
9878 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9879 {
9880 	struct mgmt_ev_pin_code_request ev;
9881 
9882 	bacpy(&ev.addr.bdaddr, bdaddr);
9883 	ev.addr.type = BDADDR_BREDR;
9884 	ev.secure = secure;
9885 
9886 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9887 }
9888 
9889 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9890 				  u8 status)
9891 {
9892 	struct mgmt_pending_cmd *cmd;
9893 
9894 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9895 	if (!cmd)
9896 		return;
9897 
9898 	cmd->cmd_complete(cmd, mgmt_status(status));
9899 	mgmt_pending_remove(cmd);
9900 }
9901 
9902 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9903 				      u8 status)
9904 {
9905 	struct mgmt_pending_cmd *cmd;
9906 
9907 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9908 	if (!cmd)
9909 		return;
9910 
9911 	cmd->cmd_complete(cmd, mgmt_status(status));
9912 	mgmt_pending_remove(cmd);
9913 }
9914 
9915 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9916 			      u8 link_type, u8 addr_type, u32 value,
9917 			      u8 confirm_hint)
9918 {
9919 	struct mgmt_ev_user_confirm_request ev;
9920 
9921 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9922 
9923 	bacpy(&ev.addr.bdaddr, bdaddr);
9924 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9925 	ev.confirm_hint = confirm_hint;
9926 	ev.value = cpu_to_le32(value);
9927 
9928 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9929 			  NULL);
9930 }
9931 
9932 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9933 			      u8 link_type, u8 addr_type)
9934 {
9935 	struct mgmt_ev_user_passkey_request ev;
9936 
9937 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9938 
9939 	bacpy(&ev.addr.bdaddr, bdaddr);
9940 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9941 
9942 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9943 			  NULL);
9944 }
9945 
9946 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9947 				      u8 link_type, u8 addr_type, u8 status,
9948 				      u8 opcode)
9949 {
9950 	struct mgmt_pending_cmd *cmd;
9951 
9952 	cmd = pending_find(opcode, hdev);
9953 	if (!cmd)
9954 		return -ENOENT;
9955 
9956 	cmd->cmd_complete(cmd, mgmt_status(status));
9957 	mgmt_pending_remove(cmd);
9958 
9959 	return 0;
9960 }
9961 
9962 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9963 				     u8 link_type, u8 addr_type, u8 status)
9964 {
9965 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9966 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9967 }
9968 
9969 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9970 					 u8 link_type, u8 addr_type, u8 status)
9971 {
9972 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9973 					  status,
9974 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9975 }
9976 
9977 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9978 				     u8 link_type, u8 addr_type, u8 status)
9979 {
9980 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9981 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9982 }
9983 
9984 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9985 					 u8 link_type, u8 addr_type, u8 status)
9986 {
9987 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9988 					  status,
9989 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9990 }
9991 
9992 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9993 			     u8 link_type, u8 addr_type, u32 passkey,
9994 			     u8 entered)
9995 {
9996 	struct mgmt_ev_passkey_notify ev;
9997 
9998 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9999 
10000 	bacpy(&ev.addr.bdaddr, bdaddr);
10001 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
10002 	ev.passkey = __cpu_to_le32(passkey);
10003 	ev.entered = entered;
10004 
10005 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10006 }
10007 
10008 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10009 {
10010 	struct mgmt_ev_auth_failed ev;
10011 	struct mgmt_pending_cmd *cmd;
10012 	u8 status = mgmt_status(hci_status);
10013 
10014 	bacpy(&ev.addr.bdaddr, &conn->dst);
10015 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10016 	ev.status = status;
10017 
10018 	cmd = find_pairing(conn);
10019 
10020 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10021 		    cmd ? cmd->sk : NULL);
10022 
10023 	if (cmd) {
10024 		cmd->cmd_complete(cmd, status);
10025 		mgmt_pending_remove(cmd);
10026 	}
10027 }
10028 
10029 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10030 {
10031 	struct cmd_lookup match = { NULL, hdev };
10032 	bool changed;
10033 
10034 	if (status) {
10035 		u8 mgmt_err = mgmt_status(status);
10036 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10037 				     cmd_status_rsp, &mgmt_err);
10038 		return;
10039 	}
10040 
10041 	if (test_bit(HCI_AUTH, &hdev->flags))
10042 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10043 	else
10044 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10045 
10046 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10047 			     settings_rsp, &match);
10048 
10049 	if (changed)
10050 		new_settings(hdev, match.sk);
10051 
10052 	if (match.sk)
10053 		sock_put(match.sk);
10054 }
10055 
10056 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10057 {
10058 	struct cmd_lookup *match = data;
10059 
10060 	if (match->sk == NULL) {
10061 		match->sk = cmd->sk;
10062 		sock_hold(match->sk);
10063 	}
10064 }
10065 
10066 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10067 				    u8 status)
10068 {
10069 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10070 
10071 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10072 			     &match);
10073 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10074 			     &match);
10075 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10076 			     &match);
10077 
10078 	if (!status) {
10079 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10080 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10081 		ext_info_changed(hdev, NULL);
10082 	}
10083 
10084 	if (match.sk)
10085 		sock_put(match.sk);
10086 }
10087 
10088 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10089 {
10090 	struct mgmt_cp_set_local_name ev;
10091 	struct mgmt_pending_cmd *cmd;
10092 
10093 	if (status)
10094 		return;
10095 
10096 	memset(&ev, 0, sizeof(ev));
10097 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10098 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10099 
10100 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10101 	if (!cmd) {
10102 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10103 
10104 		/* If this is a HCI command related to powering on the
10105 		 * HCI dev don't send any mgmt signals.
10106 		 */
10107 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10108 			return;
10109 
10110 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10111 			return;
10112 	}
10113 
10114 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10115 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10116 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10117 }
10118 
10119 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10120 {
10121 	int i;
10122 
10123 	for (i = 0; i < uuid_count; i++) {
10124 		if (!memcmp(uuid, uuids[i], 16))
10125 			return true;
10126 	}
10127 
10128 	return false;
10129 }
10130 
10131 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10132 {
10133 	u16 parsed = 0;
10134 
10135 	while (parsed < eir_len) {
10136 		u8 field_len = eir[0];
10137 		u8 uuid[16];
10138 		int i;
10139 
10140 		if (field_len == 0)
10141 			break;
10142 
10143 		if (eir_len - parsed < field_len + 1)
10144 			break;
10145 
10146 		switch (eir[1]) {
10147 		case EIR_UUID16_ALL:
10148 		case EIR_UUID16_SOME:
10149 			for (i = 0; i + 3 <= field_len; i += 2) {
10150 				memcpy(uuid, bluetooth_base_uuid, 16);
10151 				uuid[13] = eir[i + 3];
10152 				uuid[12] = eir[i + 2];
10153 				if (has_uuid(uuid, uuid_count, uuids))
10154 					return true;
10155 			}
10156 			break;
10157 		case EIR_UUID32_ALL:
10158 		case EIR_UUID32_SOME:
10159 			for (i = 0; i + 5 <= field_len; i += 4) {
10160 				memcpy(uuid, bluetooth_base_uuid, 16);
10161 				uuid[15] = eir[i + 5];
10162 				uuid[14] = eir[i + 4];
10163 				uuid[13] = eir[i + 3];
10164 				uuid[12] = eir[i + 2];
10165 				if (has_uuid(uuid, uuid_count, uuids))
10166 					return true;
10167 			}
10168 			break;
10169 		case EIR_UUID128_ALL:
10170 		case EIR_UUID128_SOME:
10171 			for (i = 0; i + 17 <= field_len; i += 16) {
10172 				memcpy(uuid, eir + i + 2, 16);
10173 				if (has_uuid(uuid, uuid_count, uuids))
10174 					return true;
10175 			}
10176 			break;
10177 		}
10178 
10179 		parsed += field_len + 1;
10180 		eir += field_len + 1;
10181 	}
10182 
10183 	return false;
10184 }
10185 
10186 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10187 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10188 {
10189 	/* If a RSSI threshold has been specified, and
10190 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10191 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10192 	 * is set, let it through for further processing, as we might need to
10193 	 * restart the scan.
10194 	 *
10195 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10196 	 * the results are also dropped.
10197 	 */
10198 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10199 	    (rssi == HCI_RSSI_INVALID ||
10200 	    (rssi < hdev->discovery.rssi &&
10201 	     !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10202 		return  false;
10203 
10204 	if (hdev->discovery.uuid_count != 0) {
10205 		/* If a list of UUIDs is provided in filter, results with no
10206 		 * matching UUID should be dropped.
10207 		 */
10208 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10209 				   hdev->discovery.uuids) &&
10210 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10211 				   hdev->discovery.uuid_count,
10212 				   hdev->discovery.uuids))
10213 			return false;
10214 	}
10215 
10216 	/* If duplicate filtering does not report RSSI changes, then restart
10217 	 * scanning to ensure updated result with updated RSSI values.
10218 	 */
10219 	if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10220 		/* Validate RSSI value against the RSSI threshold once more. */
10221 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10222 		    rssi < hdev->discovery.rssi)
10223 			return false;
10224 	}
10225 
10226 	return true;
10227 }
10228 
10229 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10230 				  bdaddr_t *bdaddr, u8 addr_type)
10231 {
10232 	struct mgmt_ev_adv_monitor_device_lost ev;
10233 
10234 	ev.monitor_handle = cpu_to_le16(handle);
10235 	bacpy(&ev.addr.bdaddr, bdaddr);
10236 	ev.addr.type = addr_type;
10237 
10238 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10239 		   NULL);
10240 }
10241 
10242 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10243 					       struct sk_buff *skb,
10244 					       struct sock *skip_sk,
10245 					       u16 handle)
10246 {
10247 	struct sk_buff *advmon_skb;
10248 	size_t advmon_skb_len;
10249 	__le16 *monitor_handle;
10250 
10251 	if (!skb)
10252 		return;
10253 
10254 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10255 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10256 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10257 				    advmon_skb_len);
10258 	if (!advmon_skb)
10259 		return;
10260 
10261 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10262 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10263 	 * store monitor_handle of the matched monitor.
10264 	 */
10265 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10266 	*monitor_handle = cpu_to_le16(handle);
10267 	skb_put_data(advmon_skb, skb->data, skb->len);
10268 
10269 	mgmt_event_skb(advmon_skb, skip_sk);
10270 }
10271 
10272 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10273 					  bdaddr_t *bdaddr, bool report_device,
10274 					  struct sk_buff *skb,
10275 					  struct sock *skip_sk)
10276 {
10277 	struct monitored_device *dev, *tmp;
10278 	bool matched = false;
10279 	bool notified = false;
10280 
10281 	/* We have received the Advertisement Report because:
10282 	 * 1. the kernel has initiated active discovery
10283 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10284 	 *    passive scanning
10285 	 * 3. if none of the above is true, we have one or more active
10286 	 *    Advertisement Monitor
10287 	 *
10288 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10289 	 * and report ONLY one advertisement per device for the matched Monitor
10290 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10291 	 *
10292 	 * For case 3, since we are not active scanning and all advertisements
10293 	 * received are due to a matched Advertisement Monitor, report all
10294 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10295 	 */
10296 	if (report_device && !hdev->advmon_pend_notify) {
10297 		mgmt_event_skb(skb, skip_sk);
10298 		return;
10299 	}
10300 
10301 	hdev->advmon_pend_notify = false;
10302 
10303 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10304 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10305 			matched = true;
10306 
10307 			if (!dev->notified) {
10308 				mgmt_send_adv_monitor_device_found(hdev, skb,
10309 								   skip_sk,
10310 								   dev->handle);
10311 				notified = true;
10312 				dev->notified = true;
10313 			}
10314 		}
10315 
10316 		if (!dev->notified)
10317 			hdev->advmon_pend_notify = true;
10318 	}
10319 
10320 	if (!report_device &&
10321 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10322 		/* Handle 0 indicates that we are not active scanning and this
10323 		 * is a subsequent advertisement report for an already matched
10324 		 * Advertisement Monitor or the controller offloading support
10325 		 * is not available.
10326 		 */
10327 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10328 	}
10329 
10330 	if (report_device)
10331 		mgmt_event_skb(skb, skip_sk);
10332 	else
10333 		kfree_skb(skb);
10334 }
10335 
10336 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10337 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10338 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10339 			      u64 instant)
10340 {
10341 	struct sk_buff *skb;
10342 	struct mgmt_ev_mesh_device_found *ev;
10343 	int i, j;
10344 
10345 	if (!hdev->mesh_ad_types[0])
10346 		goto accepted;
10347 
10348 	/* Scan for requested AD types */
10349 	if (eir_len > 0) {
10350 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10351 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10352 				if (!hdev->mesh_ad_types[j])
10353 					break;
10354 
10355 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10356 					goto accepted;
10357 			}
10358 		}
10359 	}
10360 
10361 	if (scan_rsp_len > 0) {
10362 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10363 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10364 				if (!hdev->mesh_ad_types[j])
10365 					break;
10366 
10367 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10368 					goto accepted;
10369 			}
10370 		}
10371 	}
10372 
10373 	return;
10374 
10375 accepted:
10376 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10377 			     sizeof(*ev) + eir_len + scan_rsp_len);
10378 	if (!skb)
10379 		return;
10380 
10381 	ev = skb_put(skb, sizeof(*ev));
10382 
10383 	bacpy(&ev->addr.bdaddr, bdaddr);
10384 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10385 	ev->rssi = rssi;
10386 	ev->flags = cpu_to_le32(flags);
10387 	ev->instant = cpu_to_le64(instant);
10388 
10389 	if (eir_len > 0)
10390 		/* Copy EIR or advertising data into event */
10391 		skb_put_data(skb, eir, eir_len);
10392 
10393 	if (scan_rsp_len > 0)
10394 		/* Append scan response data to event */
10395 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10396 
10397 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10398 
10399 	mgmt_event_skb(skb, NULL);
10400 }
10401 
10402 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10403 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10404 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10405 		       u64 instant)
10406 {
10407 	struct sk_buff *skb;
10408 	struct mgmt_ev_device_found *ev;
10409 	bool report_device = hci_discovery_active(hdev);
10410 
10411 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10412 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10413 				  eir, eir_len, scan_rsp, scan_rsp_len,
10414 				  instant);
10415 
10416 	/* Don't send events for a non-kernel initiated discovery. With
10417 	 * LE one exception is if we have pend_le_reports > 0 in which
10418 	 * case we're doing passive scanning and want these events.
10419 	 */
10420 	if (!hci_discovery_active(hdev)) {
10421 		if (link_type == ACL_LINK)
10422 			return;
10423 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10424 			report_device = true;
10425 		else if (!hci_is_adv_monitoring(hdev))
10426 			return;
10427 	}
10428 
10429 	if (hdev->discovery.result_filtering) {
10430 		/* We are using service discovery */
10431 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10432 				     scan_rsp_len))
10433 			return;
10434 	}
10435 
10436 	if (hdev->discovery.limited) {
10437 		/* Check for limited discoverable bit */
10438 		if (dev_class) {
10439 			if (!(dev_class[1] & 0x20))
10440 				return;
10441 		} else {
10442 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10443 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10444 				return;
10445 		}
10446 	}
10447 
10448 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10449 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10450 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10451 	if (!skb)
10452 		return;
10453 
10454 	ev = skb_put(skb, sizeof(*ev));
10455 
10456 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10457 	 * RSSI value was reported as 0 when not available. This behavior
10458 	 * is kept when using device discovery. This is required for full
10459 	 * backwards compatibility with the API.
10460 	 *
10461 	 * However when using service discovery, the value 127 will be
10462 	 * returned when the RSSI is not available.
10463 	 */
10464 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10465 	    link_type == ACL_LINK)
10466 		rssi = 0;
10467 
10468 	bacpy(&ev->addr.bdaddr, bdaddr);
10469 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10470 	ev->rssi = rssi;
10471 	ev->flags = cpu_to_le32(flags);
10472 
10473 	if (eir_len > 0)
10474 		/* Copy EIR or advertising data into event */
10475 		skb_put_data(skb, eir, eir_len);
10476 
10477 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10478 		u8 eir_cod[5];
10479 
10480 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10481 					   dev_class, 3);
10482 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10483 	}
10484 
10485 	if (scan_rsp_len > 0)
10486 		/* Append scan response data to event */
10487 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10488 
10489 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10490 
10491 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10492 }
10493 
10494 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10495 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10496 {
10497 	struct sk_buff *skb;
10498 	struct mgmt_ev_device_found *ev;
10499 	u16 eir_len = 0;
10500 	u32 flags = 0;
10501 
10502 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10503 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10504 	if (!skb)
10505 		return;
10506 
10507 	ev = skb_put(skb, sizeof(*ev));
10508 	bacpy(&ev->addr.bdaddr, bdaddr);
10509 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10510 	ev->rssi = rssi;
10511 
10512 	if (name)
10513 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10514 	else
10515 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10516 
10517 	ev->eir_len = cpu_to_le16(eir_len);
10518 	ev->flags = cpu_to_le32(flags);
10519 
10520 	mgmt_event_skb(skb, NULL);
10521 }
10522 
10523 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10524 {
10525 	struct mgmt_ev_discovering ev;
10526 
10527 	bt_dev_dbg(hdev, "discovering %u", discovering);
10528 
10529 	memset(&ev, 0, sizeof(ev));
10530 	ev.type = hdev->discovery.type;
10531 	ev.discovering = discovering;
10532 
10533 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10534 }
10535 
10536 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10537 {
10538 	struct mgmt_ev_controller_suspend ev;
10539 
10540 	ev.suspend_state = state;
10541 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10542 }
10543 
10544 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10545 		   u8 addr_type)
10546 {
10547 	struct mgmt_ev_controller_resume ev;
10548 
10549 	ev.wake_reason = reason;
10550 	if (bdaddr) {
10551 		bacpy(&ev.addr.bdaddr, bdaddr);
10552 		ev.addr.type = addr_type;
10553 	} else {
10554 		memset(&ev.addr, 0, sizeof(ev.addr));
10555 	}
10556 
10557 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10558 }
10559 
10560 static struct hci_mgmt_chan chan = {
10561 	.channel	= HCI_CHANNEL_CONTROL,
10562 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10563 	.handlers	= mgmt_handlers,
10564 	.hdev_init	= mgmt_init_hdev,
10565 };
10566 
10567 int mgmt_init(void)
10568 {
10569 	return hci_mgmt_chan_register(&chan);
10570 }
10571 
10572 void mgmt_exit(void)
10573 {
10574 	hci_mgmt_chan_unregister(&chan);
10575 }
10576 
10577 void mgmt_cleanup(struct sock *sk)
10578 {
10579 	struct mgmt_mesh_tx *mesh_tx;
10580 	struct hci_dev *hdev;
10581 
10582 	read_lock(&hci_dev_list_lock);
10583 
10584 	list_for_each_entry(hdev, &hci_dev_list, list) {
10585 		do {
10586 			mesh_tx = mgmt_mesh_next(hdev, sk);
10587 
10588 			if (mesh_tx)
10589 				mesh_send_complete(hdev, mesh_tx, true);
10590 		} while (mesh_tx);
10591 	}
10592 
10593 	read_unlock(&hci_dev_list_lock);
10594 }
10595