xref: /linux/net/bluetooth/mgmt.c (revision 0dd88eaa71264a25f950fbf9052ed87bf716fb7c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	secs_to_jiffies(2)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	if (ll_privacy_capable(hdev))
855 		settings |= MGMT_SETTING_LL_PRIVACY;
856 
857 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
858 
859 	return settings;
860 }
861 
862 static u32 get_current_settings(struct hci_dev *hdev)
863 {
864 	u32 settings = 0;
865 
866 	if (hdev_is_powered(hdev))
867 		settings |= MGMT_SETTING_POWERED;
868 
869 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
870 		settings |= MGMT_SETTING_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
873 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
876 		settings |= MGMT_SETTING_DISCOVERABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
879 		settings |= MGMT_SETTING_BONDABLE;
880 
881 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
882 		settings |= MGMT_SETTING_BREDR;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
885 		settings |= MGMT_SETTING_LE;
886 
887 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
888 		settings |= MGMT_SETTING_LINK_SECURITY;
889 
890 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
891 		settings |= MGMT_SETTING_SSP;
892 
893 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
894 		settings |= MGMT_SETTING_ADVERTISING;
895 
896 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
897 		settings |= MGMT_SETTING_SECURE_CONN;
898 
899 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
900 		settings |= MGMT_SETTING_DEBUG_KEYS;
901 
902 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
903 		settings |= MGMT_SETTING_PRIVACY;
904 
905 	/* The current setting for static address has two purposes. The
906 	 * first is to indicate if the static address will be used and
907 	 * the second is to indicate if it is actually set.
908 	 *
909 	 * This means if the static address is not configured, this flag
910 	 * will never be set. If the address is configured, then if the
911 	 * address is actually used decides if the flag is set or not.
912 	 *
913 	 * For single mode LE only controllers and dual-mode controllers
914 	 * with BR/EDR disabled, the existence of the static address will
915 	 * be evaluated.
916 	 */
917 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
918 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
919 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
920 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
921 			settings |= MGMT_SETTING_STATIC_ADDRESS;
922 	}
923 
924 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
925 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
926 
927 	if (cis_central_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_CENTRAL;
929 
930 	if (cis_peripheral_capable(hdev))
931 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
932 
933 	if (bis_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_BROADCASTER;
935 
936 	if (sync_recv_capable(hdev))
937 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
938 
939 	if (ll_privacy_capable(hdev))
940 		settings |= MGMT_SETTING_LL_PRIVACY;
941 
942 	return settings;
943 }
944 
945 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
946 {
947 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
948 }
949 
950 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
951 {
952 	struct mgmt_pending_cmd *cmd;
953 
954 	/* If there's a pending mgmt command the flags will not yet have
955 	 * their final values, so check for this first.
956 	 */
957 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
958 	if (cmd) {
959 		struct mgmt_mode *cp = cmd->param;
960 		if (cp->val == 0x01)
961 			return LE_AD_GENERAL;
962 		else if (cp->val == 0x02)
963 			return LE_AD_LIMITED;
964 	} else {
965 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
966 			return LE_AD_LIMITED;
967 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
968 			return LE_AD_GENERAL;
969 	}
970 
971 	return 0;
972 }
973 
974 bool mgmt_get_connectable(struct hci_dev *hdev)
975 {
976 	struct mgmt_pending_cmd *cmd;
977 
978 	/* If there's a pending mgmt command the flag will not yet have
979 	 * it's final value, so check for this first.
980 	 */
981 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
982 	if (cmd) {
983 		struct mgmt_mode *cp = cmd->param;
984 
985 		return cp->val;
986 	}
987 
988 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
989 }
990 
991 static int service_cache_sync(struct hci_dev *hdev, void *data)
992 {
993 	hci_update_eir_sync(hdev);
994 	hci_update_class_sync(hdev);
995 
996 	return 0;
997 }
998 
999 static void service_cache_off(struct work_struct *work)
1000 {
1001 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1002 					    service_cache.work);
1003 
1004 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1005 		return;
1006 
1007 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1008 }
1009 
1010 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1011 {
1012 	/* The generation of a new RPA and programming it into the
1013 	 * controller happens in the hci_req_enable_advertising()
1014 	 * function.
1015 	 */
1016 	if (ext_adv_capable(hdev))
1017 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1018 	else
1019 		return hci_enable_advertising_sync(hdev);
1020 }
1021 
1022 static void rpa_expired(struct work_struct *work)
1023 {
1024 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1025 					    rpa_expired.work);
1026 
1027 	bt_dev_dbg(hdev, "");
1028 
1029 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1030 
1031 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1032 		return;
1033 
1034 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1035 }
1036 
1037 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1038 
1039 static void discov_off(struct work_struct *work)
1040 {
1041 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 					    discov_off.work);
1043 
1044 	bt_dev_dbg(hdev, "");
1045 
1046 	hci_dev_lock(hdev);
1047 
1048 	/* When discoverable timeout triggers, then just make sure
1049 	 * the limited discoverable flag is cleared. Even in the case
1050 	 * of a timeout triggered from general discoverable, it is
1051 	 * safe to unconditionally clear the flag.
1052 	 */
1053 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1054 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1055 	hdev->discov_timeout = 0;
1056 
1057 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1058 
1059 	mgmt_new_settings(hdev);
1060 
1061 	hci_dev_unlock(hdev);
1062 }
1063 
1064 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1065 
1066 static void mesh_send_complete(struct hci_dev *hdev,
1067 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1068 {
1069 	u8 handle = mesh_tx->handle;
1070 
1071 	if (!silent)
1072 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1073 			   sizeof(handle), NULL);
1074 
1075 	mgmt_mesh_remove(mesh_tx);
1076 }
1077 
1078 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1079 {
1080 	struct mgmt_mesh_tx *mesh_tx;
1081 
1082 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1083 	hci_disable_advertising_sync(hdev);
1084 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1085 
1086 	if (mesh_tx)
1087 		mesh_send_complete(hdev, mesh_tx, false);
1088 
1089 	return 0;
1090 }
1091 
1092 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1093 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1094 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1095 {
1096 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1097 
1098 	if (!mesh_tx)
1099 		return;
1100 
1101 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1102 				 mesh_send_start_complete);
1103 
1104 	if (err < 0)
1105 		mesh_send_complete(hdev, mesh_tx, false);
1106 	else
1107 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1108 }
1109 
1110 static void mesh_send_done(struct work_struct *work)
1111 {
1112 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 					    mesh_send_done.work);
1114 
1115 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1116 		return;
1117 
1118 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1119 }
1120 
1121 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1122 {
1123 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1124 		return;
1125 
1126 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1127 
1128 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1129 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1130 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1131 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1132 
1133 	/* Non-mgmt controlled devices get this bit set
1134 	 * implicitly so that pairing works for them, however
1135 	 * for mgmt we require user-space to explicitly enable
1136 	 * it
1137 	 */
1138 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1139 
1140 	hci_dev_set_flag(hdev, HCI_MGMT);
1141 }
1142 
1143 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1144 				void *data, u16 data_len)
1145 {
1146 	struct mgmt_rp_read_info rp;
1147 
1148 	bt_dev_dbg(hdev, "sock %p", sk);
1149 
1150 	hci_dev_lock(hdev);
1151 
1152 	memset(&rp, 0, sizeof(rp));
1153 
1154 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1155 
1156 	rp.version = hdev->hci_ver;
1157 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1158 
1159 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1160 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1161 
1162 	memcpy(rp.dev_class, hdev->dev_class, 3);
1163 
1164 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1165 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1166 
1167 	hci_dev_unlock(hdev);
1168 
1169 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1170 				 sizeof(rp));
1171 }
1172 
1173 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1174 {
1175 	u16 eir_len = 0;
1176 	size_t name_len;
1177 
1178 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1179 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1180 					  hdev->dev_class, 3);
1181 
1182 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1183 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1184 					  hdev->appearance);
1185 
1186 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1187 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1188 				  hdev->dev_name, name_len);
1189 
1190 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1191 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1192 				  hdev->short_name, name_len);
1193 
1194 	return eir_len;
1195 }
1196 
1197 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1198 				    void *data, u16 data_len)
1199 {
1200 	char buf[512];
1201 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1202 	u16 eir_len;
1203 
1204 	bt_dev_dbg(hdev, "sock %p", sk);
1205 
1206 	memset(&buf, 0, sizeof(buf));
1207 
1208 	hci_dev_lock(hdev);
1209 
1210 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1211 
1212 	rp->version = hdev->hci_ver;
1213 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1214 
1215 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1216 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1217 
1218 
1219 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1220 	rp->eir_len = cpu_to_le16(eir_len);
1221 
1222 	hci_dev_unlock(hdev);
1223 
1224 	/* If this command is called at least once, then the events
1225 	 * for class of device and local name changes are disabled
1226 	 * and only the new extended controller information event
1227 	 * is used.
1228 	 */
1229 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1230 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1231 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1232 
1233 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1234 				 sizeof(*rp) + eir_len);
1235 }
1236 
1237 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1238 {
1239 	char buf[512];
1240 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1241 	u16 eir_len;
1242 
1243 	memset(buf, 0, sizeof(buf));
1244 
1245 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1246 	ev->eir_len = cpu_to_le16(eir_len);
1247 
1248 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1249 				  sizeof(*ev) + eir_len,
1250 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1251 }
1252 
1253 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1254 {
1255 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1256 
1257 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1258 				 sizeof(settings));
1259 }
1260 
1261 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1262 {
1263 	struct mgmt_ev_advertising_added ev;
1264 
1265 	ev.instance = instance;
1266 
1267 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1268 }
1269 
1270 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1271 			      u8 instance)
1272 {
1273 	struct mgmt_ev_advertising_removed ev;
1274 
1275 	ev.instance = instance;
1276 
1277 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1278 }
1279 
1280 static void cancel_adv_timeout(struct hci_dev *hdev)
1281 {
1282 	if (hdev->adv_instance_timeout) {
1283 		hdev->adv_instance_timeout = 0;
1284 		cancel_delayed_work(&hdev->adv_instance_expire);
1285 	}
1286 }
1287 
1288 /* This function requires the caller holds hdev->lock */
1289 static void restart_le_actions(struct hci_dev *hdev)
1290 {
1291 	struct hci_conn_params *p;
1292 
1293 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1294 		/* Needed for AUTO_OFF case where might not "really"
1295 		 * have been powered off.
1296 		 */
1297 		hci_pend_le_list_del_init(p);
1298 
1299 		switch (p->auto_connect) {
1300 		case HCI_AUTO_CONN_DIRECT:
1301 		case HCI_AUTO_CONN_ALWAYS:
1302 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1303 			break;
1304 		case HCI_AUTO_CONN_REPORT:
1305 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1306 			break;
1307 		default:
1308 			break;
1309 		}
1310 	}
1311 }
1312 
1313 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1314 {
1315 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1316 
1317 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1318 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1319 }
1320 
1321 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1322 {
1323 	struct mgmt_pending_cmd *cmd = data;
1324 	struct mgmt_mode *cp;
1325 
1326 	/* Make sure cmd still outstanding. */
1327 	if (err == -ECANCELED ||
1328 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1329 		return;
1330 
1331 	cp = cmd->param;
1332 
1333 	bt_dev_dbg(hdev, "err %d", err);
1334 
1335 	if (!err) {
1336 		if (cp->val) {
1337 			hci_dev_lock(hdev);
1338 			restart_le_actions(hdev);
1339 			hci_update_passive_scan(hdev);
1340 			hci_dev_unlock(hdev);
1341 		}
1342 
1343 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1344 
1345 		/* Only call new_setting for power on as power off is deferred
1346 		 * to hdev->power_off work which does call hci_dev_do_close.
1347 		 */
1348 		if (cp->val)
1349 			new_settings(hdev, cmd->sk);
1350 	} else {
1351 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1352 				mgmt_status(err));
1353 	}
1354 
1355 	mgmt_pending_remove(cmd);
1356 }
1357 
1358 static int set_powered_sync(struct hci_dev *hdev, void *data)
1359 {
1360 	struct mgmt_pending_cmd *cmd = data;
1361 	struct mgmt_mode *cp;
1362 
1363 	/* Make sure cmd still outstanding. */
1364 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1365 		return -ECANCELED;
1366 
1367 	cp = cmd->param;
1368 
1369 	BT_DBG("%s", hdev->name);
1370 
1371 	return hci_set_powered_sync(hdev, cp->val);
1372 }
1373 
1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1375 		       u16 len)
1376 {
1377 	struct mgmt_mode *cp = data;
1378 	struct mgmt_pending_cmd *cmd;
1379 	int err;
1380 
1381 	bt_dev_dbg(hdev, "sock %p", sk);
1382 
1383 	if (cp->val != 0x00 && cp->val != 0x01)
1384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				       MGMT_STATUS_INVALID_PARAMS);
1386 
1387 	hci_dev_lock(hdev);
1388 
1389 	if (!cp->val) {
1390 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1391 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1392 					      MGMT_STATUS_BUSY);
1393 			goto failed;
1394 		}
1395 	}
1396 
1397 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1398 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1399 				      MGMT_STATUS_BUSY);
1400 		goto failed;
1401 	}
1402 
1403 	if (!!cp->val == hdev_is_powered(hdev)) {
1404 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1405 		goto failed;
1406 	}
1407 
1408 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1409 	if (!cmd) {
1410 		err = -ENOMEM;
1411 		goto failed;
1412 	}
1413 
1414 	/* Cancel potentially blocking sync operation before power off */
1415 	if (cp->val == 0x00) {
1416 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1417 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1418 					 mgmt_set_powered_complete);
1419 	} else {
1420 		/* Use hci_cmd_sync_submit since hdev might not be running */
1421 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1422 					  mgmt_set_powered_complete);
1423 	}
1424 
1425 	if (err < 0)
1426 		mgmt_pending_remove(cmd);
1427 
1428 failed:
1429 	hci_dev_unlock(hdev);
1430 	return err;
1431 }
1432 
1433 int mgmt_new_settings(struct hci_dev *hdev)
1434 {
1435 	return new_settings(hdev, NULL);
1436 }
1437 
1438 struct cmd_lookup {
1439 	struct sock *sk;
1440 	struct hci_dev *hdev;
1441 	u8 mgmt_status;
1442 };
1443 
1444 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1445 {
1446 	struct cmd_lookup *match = data;
1447 
1448 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1449 
1450 	list_del(&cmd->list);
1451 
1452 	if (match->sk == NULL) {
1453 		match->sk = cmd->sk;
1454 		sock_hold(match->sk);
1455 	}
1456 
1457 	mgmt_pending_free(cmd);
1458 }
1459 
1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 	u8 *status = data;
1463 
1464 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1465 	mgmt_pending_remove(cmd);
1466 }
1467 
1468 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1469 {
1470 	struct cmd_lookup *match = data;
1471 
1472 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1473 	 * removed/freed.
1474 	 */
1475 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1476 
1477 	if (cmd->cmd_complete) {
1478 		cmd->cmd_complete(cmd, match->mgmt_status);
1479 		mgmt_pending_remove(cmd);
1480 
1481 		return;
1482 	}
1483 
1484 	cmd_status_rsp(cmd, data);
1485 }
1486 
1487 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 {
1489 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490 				 cmd->param, cmd->param_len);
1491 }
1492 
1493 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1494 {
1495 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1496 				 cmd->param, sizeof(struct mgmt_addr_info));
1497 }
1498 
1499 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1500 {
1501 	if (!lmp_bredr_capable(hdev))
1502 		return MGMT_STATUS_NOT_SUPPORTED;
1503 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1504 		return MGMT_STATUS_REJECTED;
1505 	else
1506 		return MGMT_STATUS_SUCCESS;
1507 }
1508 
1509 static u8 mgmt_le_support(struct hci_dev *hdev)
1510 {
1511 	if (!lmp_le_capable(hdev))
1512 		return MGMT_STATUS_NOT_SUPPORTED;
1513 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1514 		return MGMT_STATUS_REJECTED;
1515 	else
1516 		return MGMT_STATUS_SUCCESS;
1517 }
1518 
1519 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1520 					   int err)
1521 {
1522 	struct mgmt_pending_cmd *cmd = data;
1523 
1524 	bt_dev_dbg(hdev, "err %d", err);
1525 
1526 	/* Make sure cmd still outstanding. */
1527 	if (err == -ECANCELED ||
1528 	    cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1529 		return;
1530 
1531 	hci_dev_lock(hdev);
1532 
1533 	if (err) {
1534 		u8 mgmt_err = mgmt_status(err);
1535 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1536 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 		goto done;
1538 	}
1539 
1540 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1541 	    hdev->discov_timeout > 0) {
1542 		int to = secs_to_jiffies(hdev->discov_timeout);
1543 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1544 	}
1545 
1546 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1547 	new_settings(hdev, cmd->sk);
1548 
1549 done:
1550 	mgmt_pending_remove(cmd);
1551 	hci_dev_unlock(hdev);
1552 }
1553 
1554 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1555 {
1556 	BT_DBG("%s", hdev->name);
1557 
1558 	return hci_update_discoverable_sync(hdev);
1559 }
1560 
1561 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1562 			    u16 len)
1563 {
1564 	struct mgmt_cp_set_discoverable *cp = data;
1565 	struct mgmt_pending_cmd *cmd;
1566 	u16 timeout;
1567 	int err;
1568 
1569 	bt_dev_dbg(hdev, "sock %p", sk);
1570 
1571 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1572 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1573 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				       MGMT_STATUS_REJECTED);
1575 
1576 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1577 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1578 				       MGMT_STATUS_INVALID_PARAMS);
1579 
1580 	timeout = __le16_to_cpu(cp->timeout);
1581 
1582 	/* Disabling discoverable requires that no timeout is set,
1583 	 * and enabling limited discoverable requires a timeout.
1584 	 */
1585 	if ((cp->val == 0x00 && timeout > 0) ||
1586 	    (cp->val == 0x02 && timeout == 0))
1587 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1588 				       MGMT_STATUS_INVALID_PARAMS);
1589 
1590 	hci_dev_lock(hdev);
1591 
1592 	if (!hdev_is_powered(hdev) && timeout > 0) {
1593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 				      MGMT_STATUS_NOT_POWERED);
1595 		goto failed;
1596 	}
1597 
1598 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 				      MGMT_STATUS_BUSY);
1602 		goto failed;
1603 	}
1604 
1605 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1606 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1607 				      MGMT_STATUS_REJECTED);
1608 		goto failed;
1609 	}
1610 
1611 	if (hdev->advertising_paused) {
1612 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1613 				      MGMT_STATUS_BUSY);
1614 		goto failed;
1615 	}
1616 
1617 	if (!hdev_is_powered(hdev)) {
1618 		bool changed = false;
1619 
1620 		/* Setting limited discoverable when powered off is
1621 		 * not a valid operation since it requires a timeout
1622 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1623 		 */
1624 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1625 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1626 			changed = true;
1627 		}
1628 
1629 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 		if (err < 0)
1631 			goto failed;
1632 
1633 		if (changed)
1634 			err = new_settings(hdev, sk);
1635 
1636 		goto failed;
1637 	}
1638 
1639 	/* If the current mode is the same, then just update the timeout
1640 	 * value with the new value. And if only the timeout gets updated,
1641 	 * then no need for any HCI transactions.
1642 	 */
1643 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1644 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1645 						   HCI_LIMITED_DISCOVERABLE)) {
1646 		cancel_delayed_work(&hdev->discov_off);
1647 		hdev->discov_timeout = timeout;
1648 
1649 		if (cp->val && hdev->discov_timeout > 0) {
1650 			int to = secs_to_jiffies(hdev->discov_timeout);
1651 			queue_delayed_work(hdev->req_workqueue,
1652 					   &hdev->discov_off, to);
1653 		}
1654 
1655 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1656 		goto failed;
1657 	}
1658 
1659 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1660 	if (!cmd) {
1661 		err = -ENOMEM;
1662 		goto failed;
1663 	}
1664 
1665 	/* Cancel any potential discoverable timeout that might be
1666 	 * still active and store new timeout value. The arming of
1667 	 * the timeout happens in the complete handler.
1668 	 */
1669 	cancel_delayed_work(&hdev->discov_off);
1670 	hdev->discov_timeout = timeout;
1671 
1672 	if (cp->val)
1673 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1674 	else
1675 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1676 
1677 	/* Limited discoverable mode */
1678 	if (cp->val == 0x02)
1679 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1680 	else
1681 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1682 
1683 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1684 				 mgmt_set_discoverable_complete);
1685 
1686 	if (err < 0)
1687 		mgmt_pending_remove(cmd);
1688 
1689 failed:
1690 	hci_dev_unlock(hdev);
1691 	return err;
1692 }
1693 
1694 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1695 					  int err)
1696 {
1697 	struct mgmt_pending_cmd *cmd = data;
1698 
1699 	bt_dev_dbg(hdev, "err %d", err);
1700 
1701 	/* Make sure cmd still outstanding. */
1702 	if (err == -ECANCELED ||
1703 	    cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1704 		return;
1705 
1706 	hci_dev_lock(hdev);
1707 
1708 	if (err) {
1709 		u8 mgmt_err = mgmt_status(err);
1710 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1711 		goto done;
1712 	}
1713 
1714 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1715 	new_settings(hdev, cmd->sk);
1716 
1717 done:
1718 	mgmt_pending_remove(cmd);
1719 
1720 	hci_dev_unlock(hdev);
1721 }
1722 
1723 static int set_connectable_update_settings(struct hci_dev *hdev,
1724 					   struct sock *sk, u8 val)
1725 {
1726 	bool changed = false;
1727 	int err;
1728 
1729 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1730 		changed = true;
1731 
1732 	if (val) {
1733 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1734 	} else {
1735 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1736 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1737 	}
1738 
1739 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1740 	if (err < 0)
1741 		return err;
1742 
1743 	if (changed) {
1744 		hci_update_scan(hdev);
1745 		hci_update_passive_scan(hdev);
1746 		return new_settings(hdev, sk);
1747 	}
1748 
1749 	return 0;
1750 }
1751 
1752 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1753 {
1754 	BT_DBG("%s", hdev->name);
1755 
1756 	return hci_update_connectable_sync(hdev);
1757 }
1758 
1759 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1760 			   u16 len)
1761 {
1762 	struct mgmt_mode *cp = data;
1763 	struct mgmt_pending_cmd *cmd;
1764 	int err;
1765 
1766 	bt_dev_dbg(hdev, "sock %p", sk);
1767 
1768 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1769 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1770 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1771 				       MGMT_STATUS_REJECTED);
1772 
1773 	if (cp->val != 0x00 && cp->val != 0x01)
1774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 				       MGMT_STATUS_INVALID_PARAMS);
1776 
1777 	hci_dev_lock(hdev);
1778 
1779 	if (!hdev_is_powered(hdev)) {
1780 		err = set_connectable_update_settings(hdev, sk, cp->val);
1781 		goto failed;
1782 	}
1783 
1784 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1785 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1787 				      MGMT_STATUS_BUSY);
1788 		goto failed;
1789 	}
1790 
1791 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1792 	if (!cmd) {
1793 		err = -ENOMEM;
1794 		goto failed;
1795 	}
1796 
1797 	if (cp->val) {
1798 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1799 	} else {
1800 		if (hdev->discov_timeout > 0)
1801 			cancel_delayed_work(&hdev->discov_off);
1802 
1803 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1805 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1806 	}
1807 
1808 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1809 				 mgmt_set_connectable_complete);
1810 
1811 	if (err < 0)
1812 		mgmt_pending_remove(cmd);
1813 
1814 failed:
1815 	hci_dev_unlock(hdev);
1816 	return err;
1817 }
1818 
1819 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1820 			u16 len)
1821 {
1822 	struct mgmt_mode *cp = data;
1823 	bool changed;
1824 	int err;
1825 
1826 	bt_dev_dbg(hdev, "sock %p", sk);
1827 
1828 	if (cp->val != 0x00 && cp->val != 0x01)
1829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1830 				       MGMT_STATUS_INVALID_PARAMS);
1831 
1832 	hci_dev_lock(hdev);
1833 
1834 	if (cp->val)
1835 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1836 	else
1837 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1838 
1839 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1840 	if (err < 0)
1841 		goto unlock;
1842 
1843 	if (changed) {
1844 		/* In limited privacy mode the change of bondable mode
1845 		 * may affect the local advertising address.
1846 		 */
1847 		hci_update_discoverable(hdev);
1848 
1849 		err = new_settings(hdev, sk);
1850 	}
1851 
1852 unlock:
1853 	hci_dev_unlock(hdev);
1854 	return err;
1855 }
1856 
1857 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1858 			     u16 len)
1859 {
1860 	struct mgmt_mode *cp = data;
1861 	struct mgmt_pending_cmd *cmd;
1862 	u8 val, status;
1863 	int err;
1864 
1865 	bt_dev_dbg(hdev, "sock %p", sk);
1866 
1867 	status = mgmt_bredr_support(hdev);
1868 	if (status)
1869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1870 				       status);
1871 
1872 	if (cp->val != 0x00 && cp->val != 0x01)
1873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1874 				       MGMT_STATUS_INVALID_PARAMS);
1875 
1876 	hci_dev_lock(hdev);
1877 
1878 	if (!hdev_is_powered(hdev)) {
1879 		bool changed = false;
1880 
1881 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1882 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1883 			changed = true;
1884 		}
1885 
1886 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 		if (err < 0)
1888 			goto failed;
1889 
1890 		if (changed)
1891 			err = new_settings(hdev, sk);
1892 
1893 		goto failed;
1894 	}
1895 
1896 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1897 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1898 				      MGMT_STATUS_BUSY);
1899 		goto failed;
1900 	}
1901 
1902 	val = !!cp->val;
1903 
1904 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1905 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1906 		goto failed;
1907 	}
1908 
1909 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1910 	if (!cmd) {
1911 		err = -ENOMEM;
1912 		goto failed;
1913 	}
1914 
1915 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1916 	if (err < 0) {
1917 		mgmt_pending_remove(cmd);
1918 		goto failed;
1919 	}
1920 
1921 failed:
1922 	hci_dev_unlock(hdev);
1923 	return err;
1924 }
1925 
1926 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1927 {
1928 	struct cmd_lookup match = { NULL, hdev };
1929 	struct mgmt_pending_cmd *cmd = data;
1930 	struct mgmt_mode *cp = cmd->param;
1931 	u8 enable = cp->val;
1932 	bool changed;
1933 
1934 	/* Make sure cmd still outstanding. */
1935 	if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1936 		return;
1937 
1938 	if (err) {
1939 		u8 mgmt_err = mgmt_status(err);
1940 
1941 		if (enable && hci_dev_test_and_clear_flag(hdev,
1942 							  HCI_SSP_ENABLED)) {
1943 			new_settings(hdev, NULL);
1944 		}
1945 
1946 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1947 				     &mgmt_err);
1948 		return;
1949 	}
1950 
1951 	if (enable) {
1952 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1953 	} else {
1954 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1955 	}
1956 
1957 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1958 
1959 	if (changed)
1960 		new_settings(hdev, match.sk);
1961 
1962 	if (match.sk)
1963 		sock_put(match.sk);
1964 
1965 	hci_update_eir_sync(hdev);
1966 }
1967 
1968 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 {
1970 	struct mgmt_pending_cmd *cmd = data;
1971 	struct mgmt_mode *cp = cmd->param;
1972 	bool changed = false;
1973 	int err;
1974 
1975 	if (cp->val)
1976 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977 
1978 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1979 
1980 	if (!err && changed)
1981 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1982 
1983 	return err;
1984 }
1985 
1986 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 {
1988 	struct mgmt_mode *cp = data;
1989 	struct mgmt_pending_cmd *cmd;
1990 	u8 status;
1991 	int err;
1992 
1993 	bt_dev_dbg(hdev, "sock %p", sk);
1994 
1995 	status = mgmt_bredr_support(hdev);
1996 	if (status)
1997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998 
1999 	if (!lmp_ssp_capable(hdev))
2000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2001 				       MGMT_STATUS_NOT_SUPPORTED);
2002 
2003 	if (cp->val != 0x00 && cp->val != 0x01)
2004 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 				       MGMT_STATUS_INVALID_PARAMS);
2006 
2007 	hci_dev_lock(hdev);
2008 
2009 	if (!hdev_is_powered(hdev)) {
2010 		bool changed;
2011 
2012 		if (cp->val) {
2013 			changed = !hci_dev_test_and_set_flag(hdev,
2014 							     HCI_SSP_ENABLED);
2015 		} else {
2016 			changed = hci_dev_test_and_clear_flag(hdev,
2017 							      HCI_SSP_ENABLED);
2018 		}
2019 
2020 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 		if (err < 0)
2022 			goto failed;
2023 
2024 		if (changed)
2025 			err = new_settings(hdev, sk);
2026 
2027 		goto failed;
2028 	}
2029 
2030 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2032 				      MGMT_STATUS_BUSY);
2033 		goto failed;
2034 	}
2035 
2036 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 		goto failed;
2039 	}
2040 
2041 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 	if (!cmd)
2043 		err = -ENOMEM;
2044 	else
2045 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2046 					 set_ssp_complete);
2047 
2048 	if (err < 0) {
2049 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 				      MGMT_STATUS_FAILED);
2051 
2052 		if (cmd)
2053 			mgmt_pending_remove(cmd);
2054 	}
2055 
2056 failed:
2057 	hci_dev_unlock(hdev);
2058 	return err;
2059 }
2060 
2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062 {
2063 	bt_dev_dbg(hdev, "sock %p", sk);
2064 
2065 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 }
2068 
2069 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2070 {
2071 	struct cmd_lookup match = { NULL, hdev };
2072 	u8 status = mgmt_status(err);
2073 
2074 	bt_dev_dbg(hdev, "err %d", err);
2075 
2076 	if (status) {
2077 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2078 							&status);
2079 		return;
2080 	}
2081 
2082 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2083 
2084 	new_settings(hdev, match.sk);
2085 
2086 	if (match.sk)
2087 		sock_put(match.sk);
2088 }
2089 
2090 static int set_le_sync(struct hci_dev *hdev, void *data)
2091 {
2092 	struct mgmt_pending_cmd *cmd = data;
2093 	struct mgmt_mode *cp = cmd->param;
2094 	u8 val = !!cp->val;
2095 	int err;
2096 
2097 	if (!val) {
2098 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2099 
2100 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2101 			hci_disable_advertising_sync(hdev);
2102 
2103 		if (ext_adv_capable(hdev))
2104 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2105 	} else {
2106 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2107 	}
2108 
2109 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2110 
2111 	/* Make sure the controller has a good default for
2112 	 * advertising data. Restrict the update to when LE
2113 	 * has actually been enabled. During power on, the
2114 	 * update in powered_update_hci will take care of it.
2115 	 */
2116 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2117 		if (ext_adv_capable(hdev)) {
2118 			int status;
2119 
2120 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2121 			if (!status)
2122 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2123 		} else {
2124 			hci_update_adv_data_sync(hdev, 0x00);
2125 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2126 		}
2127 
2128 		hci_update_passive_scan(hdev);
2129 	}
2130 
2131 	return err;
2132 }
2133 
2134 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2135 {
2136 	struct mgmt_pending_cmd *cmd = data;
2137 	u8 status = mgmt_status(err);
2138 	struct sock *sk = cmd->sk;
2139 
2140 	if (status) {
2141 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2142 				     cmd_status_rsp, &status);
2143 		return;
2144 	}
2145 
2146 	mgmt_pending_remove(cmd);
2147 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2148 }
2149 
2150 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2151 {
2152 	struct mgmt_pending_cmd *cmd = data;
2153 	struct mgmt_cp_set_mesh *cp = cmd->param;
2154 	size_t len = cmd->param_len;
2155 
2156 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2157 
2158 	if (cp->enable)
2159 		hci_dev_set_flag(hdev, HCI_MESH);
2160 	else
2161 		hci_dev_clear_flag(hdev, HCI_MESH);
2162 
2163 	len -= sizeof(*cp);
2164 
2165 	/* If filters don't fit, forward all adv pkts */
2166 	if (len <= sizeof(hdev->mesh_ad_types))
2167 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2168 
2169 	hci_update_passive_scan_sync(hdev);
2170 	return 0;
2171 }
2172 
2173 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 {
2175 	struct mgmt_cp_set_mesh *cp = data;
2176 	struct mgmt_pending_cmd *cmd;
2177 	int err = 0;
2178 
2179 	bt_dev_dbg(hdev, "sock %p", sk);
2180 
2181 	if (!lmp_le_capable(hdev) ||
2182 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2183 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2184 				       MGMT_STATUS_NOT_SUPPORTED);
2185 
2186 	if (cp->enable != 0x00 && cp->enable != 0x01)
2187 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2188 				       MGMT_STATUS_INVALID_PARAMS);
2189 
2190 	hci_dev_lock(hdev);
2191 
2192 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2193 	if (!cmd)
2194 		err = -ENOMEM;
2195 	else
2196 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2197 					 set_mesh_complete);
2198 
2199 	if (err < 0) {
2200 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2201 				      MGMT_STATUS_FAILED);
2202 
2203 		if (cmd)
2204 			mgmt_pending_remove(cmd);
2205 	}
2206 
2207 	hci_dev_unlock(hdev);
2208 	return err;
2209 }
2210 
2211 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2212 {
2213 	struct mgmt_mesh_tx *mesh_tx = data;
2214 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2215 	unsigned long mesh_send_interval;
2216 	u8 mgmt_err = mgmt_status(err);
2217 
2218 	/* Report any errors here, but don't report completion */
2219 
2220 	if (mgmt_err) {
2221 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2222 		/* Send Complete Error Code for handle */
2223 		mesh_send_complete(hdev, mesh_tx, false);
2224 		return;
2225 	}
2226 
2227 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2228 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2229 			   mesh_send_interval);
2230 }
2231 
2232 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2233 {
2234 	struct mgmt_mesh_tx *mesh_tx = data;
2235 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2236 	struct adv_info *adv, *next_instance;
2237 	u8 instance = hdev->le_num_of_adv_sets + 1;
2238 	u16 timeout, duration;
2239 	int err = 0;
2240 
2241 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2242 		return MGMT_STATUS_BUSY;
2243 
2244 	timeout = 1000;
2245 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2246 	adv = hci_add_adv_instance(hdev, instance, 0,
2247 				   send->adv_data_len, send->adv_data,
2248 				   0, NULL,
2249 				   timeout, duration,
2250 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2251 				   hdev->le_adv_min_interval,
2252 				   hdev->le_adv_max_interval,
2253 				   mesh_tx->handle);
2254 
2255 	if (!IS_ERR(adv))
2256 		mesh_tx->instance = instance;
2257 	else
2258 		err = PTR_ERR(adv);
2259 
2260 	if (hdev->cur_adv_instance == instance) {
2261 		/* If the currently advertised instance is being changed then
2262 		 * cancel the current advertising and schedule the next
2263 		 * instance. If there is only one instance then the overridden
2264 		 * advertising data will be visible right away.
2265 		 */
2266 		cancel_adv_timeout(hdev);
2267 
2268 		next_instance = hci_get_next_instance(hdev, instance);
2269 		if (next_instance)
2270 			instance = next_instance->instance;
2271 		else
2272 			instance = 0;
2273 	} else if (hdev->adv_instance_timeout) {
2274 		/* Immediately advertise the new instance if no other, or
2275 		 * let it go naturally from queue if ADV is already happening
2276 		 */
2277 		instance = 0;
2278 	}
2279 
2280 	if (instance)
2281 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2282 
2283 	return err;
2284 }
2285 
2286 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2287 {
2288 	struct mgmt_rp_mesh_read_features *rp = data;
2289 
2290 	if (rp->used_handles >= rp->max_handles)
2291 		return;
2292 
2293 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2294 }
2295 
2296 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2297 			 void *data, u16 len)
2298 {
2299 	struct mgmt_rp_mesh_read_features rp;
2300 
2301 	if (!lmp_le_capable(hdev) ||
2302 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2303 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2304 				       MGMT_STATUS_NOT_SUPPORTED);
2305 
2306 	memset(&rp, 0, sizeof(rp));
2307 	rp.index = cpu_to_le16(hdev->id);
2308 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2309 		rp.max_handles = MESH_HANDLES_MAX;
2310 
2311 	hci_dev_lock(hdev);
2312 
2313 	if (rp.max_handles)
2314 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2315 
2316 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2317 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2318 
2319 	hci_dev_unlock(hdev);
2320 	return 0;
2321 }
2322 
2323 static int send_cancel(struct hci_dev *hdev, void *data)
2324 {
2325 	struct mgmt_pending_cmd *cmd = data;
2326 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2327 	struct mgmt_mesh_tx *mesh_tx;
2328 
2329 	if (!cancel->handle) {
2330 		do {
2331 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2332 
2333 			if (mesh_tx)
2334 				mesh_send_complete(hdev, mesh_tx, false);
2335 		} while (mesh_tx);
2336 	} else {
2337 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2338 
2339 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2340 			mesh_send_complete(hdev, mesh_tx, false);
2341 	}
2342 
2343 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 			  0, NULL, 0);
2345 	mgmt_pending_free(cmd);
2346 
2347 	return 0;
2348 }
2349 
2350 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2351 			    void *data, u16 len)
2352 {
2353 	struct mgmt_pending_cmd *cmd;
2354 	int err;
2355 
2356 	if (!lmp_le_capable(hdev) ||
2357 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2358 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				       MGMT_STATUS_NOT_SUPPORTED);
2360 
2361 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2363 				       MGMT_STATUS_REJECTED);
2364 
2365 	hci_dev_lock(hdev);
2366 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2367 	if (!cmd)
2368 		err = -ENOMEM;
2369 	else
2370 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2371 
2372 	if (err < 0) {
2373 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2374 				      MGMT_STATUS_FAILED);
2375 
2376 		if (cmd)
2377 			mgmt_pending_free(cmd);
2378 	}
2379 
2380 	hci_dev_unlock(hdev);
2381 	return err;
2382 }
2383 
2384 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2385 {
2386 	struct mgmt_mesh_tx *mesh_tx;
2387 	struct mgmt_cp_mesh_send *send = data;
2388 	struct mgmt_rp_mesh_read_features rp;
2389 	bool sending;
2390 	int err = 0;
2391 
2392 	if (!lmp_le_capable(hdev) ||
2393 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2394 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2395 				       MGMT_STATUS_NOT_SUPPORTED);
2396 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2397 	    len <= MGMT_MESH_SEND_SIZE ||
2398 	    len > (MGMT_MESH_SEND_SIZE + 31))
2399 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 				       MGMT_STATUS_REJECTED);
2401 
2402 	hci_dev_lock(hdev);
2403 
2404 	memset(&rp, 0, sizeof(rp));
2405 	rp.max_handles = MESH_HANDLES_MAX;
2406 
2407 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2408 
2409 	if (rp.max_handles <= rp.used_handles) {
2410 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2411 				      MGMT_STATUS_BUSY);
2412 		goto done;
2413 	}
2414 
2415 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2416 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2417 
2418 	if (!mesh_tx)
2419 		err = -ENOMEM;
2420 	else if (!sending)
2421 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2422 					 mesh_send_start_complete);
2423 
2424 	if (err < 0) {
2425 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2426 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2427 				      MGMT_STATUS_FAILED);
2428 
2429 		if (mesh_tx) {
2430 			if (sending)
2431 				mgmt_mesh_remove(mesh_tx);
2432 		}
2433 	} else {
2434 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2435 
2436 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2437 				  &mesh_tx->handle, 1);
2438 	}
2439 
2440 done:
2441 	hci_dev_unlock(hdev);
2442 	return err;
2443 }
2444 
2445 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2446 {
2447 	struct mgmt_mode *cp = data;
2448 	struct mgmt_pending_cmd *cmd;
2449 	int err;
2450 	u8 val, enabled;
2451 
2452 	bt_dev_dbg(hdev, "sock %p", sk);
2453 
2454 	if (!lmp_le_capable(hdev))
2455 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2456 				       MGMT_STATUS_NOT_SUPPORTED);
2457 
2458 	if (cp->val != 0x00 && cp->val != 0x01)
2459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2460 				       MGMT_STATUS_INVALID_PARAMS);
2461 
2462 	/* Bluetooth single mode LE only controllers or dual-mode
2463 	 * controllers configured as LE only devices, do not allow
2464 	 * switching LE off. These have either LE enabled explicitly
2465 	 * or BR/EDR has been previously switched off.
2466 	 *
2467 	 * When trying to enable an already enabled LE, then gracefully
2468 	 * send a positive response. Trying to disable it however will
2469 	 * result into rejection.
2470 	 */
2471 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2472 		if (cp->val == 0x01)
2473 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2474 
2475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2476 				       MGMT_STATUS_REJECTED);
2477 	}
2478 
2479 	hci_dev_lock(hdev);
2480 
2481 	val = !!cp->val;
2482 	enabled = lmp_host_le_capable(hdev);
2483 
2484 	if (!hdev_is_powered(hdev) || val == enabled) {
2485 		bool changed = false;
2486 
2487 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2488 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2489 			changed = true;
2490 		}
2491 
2492 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2493 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2494 			changed = true;
2495 		}
2496 
2497 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2498 		if (err < 0)
2499 			goto unlock;
2500 
2501 		if (changed)
2502 			err = new_settings(hdev, sk);
2503 
2504 		goto unlock;
2505 	}
2506 
2507 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2508 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2509 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2510 				      MGMT_STATUS_BUSY);
2511 		goto unlock;
2512 	}
2513 
2514 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2515 	if (!cmd)
2516 		err = -ENOMEM;
2517 	else
2518 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2519 					 set_le_complete);
2520 
2521 	if (err < 0) {
2522 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 				      MGMT_STATUS_FAILED);
2524 
2525 		if (cmd)
2526 			mgmt_pending_remove(cmd);
2527 	}
2528 
2529 unlock:
2530 	hci_dev_unlock(hdev);
2531 	return err;
2532 }
2533 
2534 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2535 {
2536 	struct mgmt_pending_cmd *cmd = data;
2537 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2538 	struct sk_buff *skb;
2539 
2540 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2541 				le16_to_cpu(cp->params_len), cp->params,
2542 				cp->event, cp->timeout ?
2543 				secs_to_jiffies(cp->timeout) :
2544 				HCI_CMD_TIMEOUT);
2545 	if (IS_ERR(skb)) {
2546 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2547 				mgmt_status(PTR_ERR(skb)));
2548 		goto done;
2549 	}
2550 
2551 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2552 			  skb->data, skb->len);
2553 
2554 	kfree_skb(skb);
2555 
2556 done:
2557 	mgmt_pending_free(cmd);
2558 
2559 	return 0;
2560 }
2561 
2562 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2563 			     void *data, u16 len)
2564 {
2565 	struct mgmt_cp_hci_cmd_sync *cp = data;
2566 	struct mgmt_pending_cmd *cmd;
2567 	int err;
2568 
2569 	if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2570 		    le16_to_cpu(cp->params_len)))
2571 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2572 				       MGMT_STATUS_INVALID_PARAMS);
2573 
2574 	hci_dev_lock(hdev);
2575 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2576 	if (!cmd)
2577 		err = -ENOMEM;
2578 	else
2579 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2580 
2581 	if (err < 0) {
2582 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2583 				      MGMT_STATUS_FAILED);
2584 
2585 		if (cmd)
2586 			mgmt_pending_free(cmd);
2587 	}
2588 
2589 	hci_dev_unlock(hdev);
2590 	return err;
2591 }
2592 
2593 /* This is a helper function to test for pending mgmt commands that can
2594  * cause CoD or EIR HCI commands. We can only allow one such pending
2595  * mgmt command at a time since otherwise we cannot easily track what
2596  * the current values are, will be, and based on that calculate if a new
2597  * HCI command needs to be sent and if yes with what value.
2598  */
2599 static bool pending_eir_or_class(struct hci_dev *hdev)
2600 {
2601 	struct mgmt_pending_cmd *cmd;
2602 
2603 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2604 		switch (cmd->opcode) {
2605 		case MGMT_OP_ADD_UUID:
2606 		case MGMT_OP_REMOVE_UUID:
2607 		case MGMT_OP_SET_DEV_CLASS:
2608 		case MGMT_OP_SET_POWERED:
2609 			return true;
2610 		}
2611 	}
2612 
2613 	return false;
2614 }
2615 
2616 static const u8 bluetooth_base_uuid[] = {
2617 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2618 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2619 };
2620 
2621 static u8 get_uuid_size(const u8 *uuid)
2622 {
2623 	u32 val;
2624 
2625 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2626 		return 128;
2627 
2628 	val = get_unaligned_le32(&uuid[12]);
2629 	if (val > 0xffff)
2630 		return 32;
2631 
2632 	return 16;
2633 }
2634 
2635 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2636 {
2637 	struct mgmt_pending_cmd *cmd = data;
2638 
2639 	bt_dev_dbg(hdev, "err %d", err);
2640 
2641 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2642 			  mgmt_status(err), hdev->dev_class, 3);
2643 
2644 	mgmt_pending_free(cmd);
2645 }
2646 
2647 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2648 {
2649 	int err;
2650 
2651 	err = hci_update_class_sync(hdev);
2652 	if (err)
2653 		return err;
2654 
2655 	return hci_update_eir_sync(hdev);
2656 }
2657 
2658 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2659 {
2660 	struct mgmt_cp_add_uuid *cp = data;
2661 	struct mgmt_pending_cmd *cmd;
2662 	struct bt_uuid *uuid;
2663 	int err;
2664 
2665 	bt_dev_dbg(hdev, "sock %p", sk);
2666 
2667 	hci_dev_lock(hdev);
2668 
2669 	if (pending_eir_or_class(hdev)) {
2670 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2671 				      MGMT_STATUS_BUSY);
2672 		goto failed;
2673 	}
2674 
2675 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2676 	if (!uuid) {
2677 		err = -ENOMEM;
2678 		goto failed;
2679 	}
2680 
2681 	memcpy(uuid->uuid, cp->uuid, 16);
2682 	uuid->svc_hint = cp->svc_hint;
2683 	uuid->size = get_uuid_size(cp->uuid);
2684 
2685 	list_add_tail(&uuid->list, &hdev->uuids);
2686 
2687 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2688 	if (!cmd) {
2689 		err = -ENOMEM;
2690 		goto failed;
2691 	}
2692 
2693 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2694 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2695 	 */
2696 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2697 				  mgmt_class_complete);
2698 	if (err < 0) {
2699 		mgmt_pending_free(cmd);
2700 		goto failed;
2701 	}
2702 
2703 failed:
2704 	hci_dev_unlock(hdev);
2705 	return err;
2706 }
2707 
2708 static bool enable_service_cache(struct hci_dev *hdev)
2709 {
2710 	if (!hdev_is_powered(hdev))
2711 		return false;
2712 
2713 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2714 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2715 				   CACHE_TIMEOUT);
2716 		return true;
2717 	}
2718 
2719 	return false;
2720 }
2721 
2722 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2723 {
2724 	int err;
2725 
2726 	err = hci_update_class_sync(hdev);
2727 	if (err)
2728 		return err;
2729 
2730 	return hci_update_eir_sync(hdev);
2731 }
2732 
2733 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2734 		       u16 len)
2735 {
2736 	struct mgmt_cp_remove_uuid *cp = data;
2737 	struct mgmt_pending_cmd *cmd;
2738 	struct bt_uuid *match, *tmp;
2739 	static const u8 bt_uuid_any[] = {
2740 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2741 	};
2742 	int err, found;
2743 
2744 	bt_dev_dbg(hdev, "sock %p", sk);
2745 
2746 	hci_dev_lock(hdev);
2747 
2748 	if (pending_eir_or_class(hdev)) {
2749 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2750 				      MGMT_STATUS_BUSY);
2751 		goto unlock;
2752 	}
2753 
2754 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2755 		hci_uuids_clear(hdev);
2756 
2757 		if (enable_service_cache(hdev)) {
2758 			err = mgmt_cmd_complete(sk, hdev->id,
2759 						MGMT_OP_REMOVE_UUID,
2760 						0, hdev->dev_class, 3);
2761 			goto unlock;
2762 		}
2763 
2764 		goto update_class;
2765 	}
2766 
2767 	found = 0;
2768 
2769 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2770 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2771 			continue;
2772 
2773 		list_del(&match->list);
2774 		kfree(match);
2775 		found++;
2776 	}
2777 
2778 	if (found == 0) {
2779 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2780 				      MGMT_STATUS_INVALID_PARAMS);
2781 		goto unlock;
2782 	}
2783 
2784 update_class:
2785 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2786 	if (!cmd) {
2787 		err = -ENOMEM;
2788 		goto unlock;
2789 	}
2790 
2791 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2792 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2793 	 */
2794 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2795 				  mgmt_class_complete);
2796 	if (err < 0)
2797 		mgmt_pending_free(cmd);
2798 
2799 unlock:
2800 	hci_dev_unlock(hdev);
2801 	return err;
2802 }
2803 
2804 static int set_class_sync(struct hci_dev *hdev, void *data)
2805 {
2806 	int err = 0;
2807 
2808 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2809 		cancel_delayed_work_sync(&hdev->service_cache);
2810 		err = hci_update_eir_sync(hdev);
2811 	}
2812 
2813 	if (err)
2814 		return err;
2815 
2816 	return hci_update_class_sync(hdev);
2817 }
2818 
2819 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			 u16 len)
2821 {
2822 	struct mgmt_cp_set_dev_class *cp = data;
2823 	struct mgmt_pending_cmd *cmd;
2824 	int err;
2825 
2826 	bt_dev_dbg(hdev, "sock %p", sk);
2827 
2828 	if (!lmp_bredr_capable(hdev))
2829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2830 				       MGMT_STATUS_NOT_SUPPORTED);
2831 
2832 	hci_dev_lock(hdev);
2833 
2834 	if (pending_eir_or_class(hdev)) {
2835 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2836 				      MGMT_STATUS_BUSY);
2837 		goto unlock;
2838 	}
2839 
2840 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2841 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2842 				      MGMT_STATUS_INVALID_PARAMS);
2843 		goto unlock;
2844 	}
2845 
2846 	hdev->major_class = cp->major;
2847 	hdev->minor_class = cp->minor;
2848 
2849 	if (!hdev_is_powered(hdev)) {
2850 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2851 					hdev->dev_class, 3);
2852 		goto unlock;
2853 	}
2854 
2855 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2856 	if (!cmd) {
2857 		err = -ENOMEM;
2858 		goto unlock;
2859 	}
2860 
2861 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2862 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2863 	 */
2864 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2865 				  mgmt_class_complete);
2866 	if (err < 0)
2867 		mgmt_pending_free(cmd);
2868 
2869 unlock:
2870 	hci_dev_unlock(hdev);
2871 	return err;
2872 }
2873 
2874 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2875 			  u16 len)
2876 {
2877 	struct mgmt_cp_load_link_keys *cp = data;
2878 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2879 				   sizeof(struct mgmt_link_key_info));
2880 	u16 key_count, expected_len;
2881 	bool changed;
2882 	int i;
2883 
2884 	bt_dev_dbg(hdev, "sock %p", sk);
2885 
2886 	if (!lmp_bredr_capable(hdev))
2887 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2888 				       MGMT_STATUS_NOT_SUPPORTED);
2889 
2890 	key_count = __le16_to_cpu(cp->key_count);
2891 	if (key_count > max_key_count) {
2892 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2893 			   key_count);
2894 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2895 				       MGMT_STATUS_INVALID_PARAMS);
2896 	}
2897 
2898 	expected_len = struct_size(cp, keys, key_count);
2899 	if (expected_len != len) {
2900 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2901 			   expected_len, len);
2902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2903 				       MGMT_STATUS_INVALID_PARAMS);
2904 	}
2905 
2906 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2907 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2908 				       MGMT_STATUS_INVALID_PARAMS);
2909 
2910 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2911 		   key_count);
2912 
2913 	hci_dev_lock(hdev);
2914 
2915 	hci_link_keys_clear(hdev);
2916 
2917 	if (cp->debug_keys)
2918 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2919 	else
2920 		changed = hci_dev_test_and_clear_flag(hdev,
2921 						      HCI_KEEP_DEBUG_KEYS);
2922 
2923 	if (changed)
2924 		new_settings(hdev, NULL);
2925 
2926 	for (i = 0; i < key_count; i++) {
2927 		struct mgmt_link_key_info *key = &cp->keys[i];
2928 
2929 		if (hci_is_blocked_key(hdev,
2930 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2931 				       key->val)) {
2932 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2933 				    &key->addr.bdaddr);
2934 			continue;
2935 		}
2936 
2937 		if (key->addr.type != BDADDR_BREDR) {
2938 			bt_dev_warn(hdev,
2939 				    "Invalid link address type %u for %pMR",
2940 				    key->addr.type, &key->addr.bdaddr);
2941 			continue;
2942 		}
2943 
2944 		if (key->type > 0x08) {
2945 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2946 				    key->type, &key->addr.bdaddr);
2947 			continue;
2948 		}
2949 
2950 		/* Always ignore debug keys and require a new pairing if
2951 		 * the user wants to use them.
2952 		 */
2953 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2954 			continue;
2955 
2956 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2957 				 key->type, key->pin_len, NULL);
2958 	}
2959 
2960 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2961 
2962 	hci_dev_unlock(hdev);
2963 
2964 	return 0;
2965 }
2966 
2967 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968 			   u8 addr_type, struct sock *skip_sk)
2969 {
2970 	struct mgmt_ev_device_unpaired ev;
2971 
2972 	bacpy(&ev.addr.bdaddr, bdaddr);
2973 	ev.addr.type = addr_type;
2974 
2975 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2976 			  skip_sk);
2977 }
2978 
2979 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2980 {
2981 	struct mgmt_pending_cmd *cmd = data;
2982 	struct mgmt_cp_unpair_device *cp = cmd->param;
2983 
2984 	if (!err)
2985 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2986 
2987 	cmd->cmd_complete(cmd, err);
2988 	mgmt_pending_free(cmd);
2989 }
2990 
2991 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2992 {
2993 	struct mgmt_pending_cmd *cmd = data;
2994 	struct mgmt_cp_unpair_device *cp = cmd->param;
2995 	struct hci_conn *conn;
2996 
2997 	if (cp->addr.type == BDADDR_BREDR)
2998 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2999 					       &cp->addr.bdaddr);
3000 	else
3001 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3002 					       le_addr_type(cp->addr.type));
3003 
3004 	if (!conn)
3005 		return 0;
3006 
3007 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3008 	 * will clean up the connection no matter the error.
3009 	 */
3010 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3011 
3012 	return 0;
3013 }
3014 
3015 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3016 			 u16 len)
3017 {
3018 	struct mgmt_cp_unpair_device *cp = data;
3019 	struct mgmt_rp_unpair_device rp;
3020 	struct hci_conn_params *params;
3021 	struct mgmt_pending_cmd *cmd;
3022 	struct hci_conn *conn;
3023 	u8 addr_type;
3024 	int err;
3025 
3026 	memset(&rp, 0, sizeof(rp));
3027 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3028 	rp.addr.type = cp->addr.type;
3029 
3030 	if (!bdaddr_type_is_valid(cp->addr.type))
3031 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3032 					 MGMT_STATUS_INVALID_PARAMS,
3033 					 &rp, sizeof(rp));
3034 
3035 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3036 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3037 					 MGMT_STATUS_INVALID_PARAMS,
3038 					 &rp, sizeof(rp));
3039 
3040 	hci_dev_lock(hdev);
3041 
3042 	if (!hdev_is_powered(hdev)) {
3043 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3044 					MGMT_STATUS_NOT_POWERED, &rp,
3045 					sizeof(rp));
3046 		goto unlock;
3047 	}
3048 
3049 	if (cp->addr.type == BDADDR_BREDR) {
3050 		/* If disconnection is requested, then look up the
3051 		 * connection. If the remote device is connected, it
3052 		 * will be later used to terminate the link.
3053 		 *
3054 		 * Setting it to NULL explicitly will cause no
3055 		 * termination of the link.
3056 		 */
3057 		if (cp->disconnect)
3058 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3059 						       &cp->addr.bdaddr);
3060 		else
3061 			conn = NULL;
3062 
3063 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3064 		if (err < 0) {
3065 			err = mgmt_cmd_complete(sk, hdev->id,
3066 						MGMT_OP_UNPAIR_DEVICE,
3067 						MGMT_STATUS_NOT_PAIRED, &rp,
3068 						sizeof(rp));
3069 			goto unlock;
3070 		}
3071 
3072 		goto done;
3073 	}
3074 
3075 	/* LE address type */
3076 	addr_type = le_addr_type(cp->addr.type);
3077 
3078 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3079 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3080 	if (err < 0) {
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3082 					MGMT_STATUS_NOT_PAIRED, &rp,
3083 					sizeof(rp));
3084 		goto unlock;
3085 	}
3086 
3087 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3088 	if (!conn) {
3089 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3090 		goto done;
3091 	}
3092 
3093 
3094 	/* Defer clearing up the connection parameters until closing to
3095 	 * give a chance of keeping them if a repairing happens.
3096 	 */
3097 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3098 
3099 	/* Disable auto-connection parameters if present */
3100 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3101 	if (params) {
3102 		if (params->explicit_connect)
3103 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3104 		else
3105 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3106 	}
3107 
3108 	/* If disconnection is not requested, then clear the connection
3109 	 * variable so that the link is not terminated.
3110 	 */
3111 	if (!cp->disconnect)
3112 		conn = NULL;
3113 
3114 done:
3115 	/* If the connection variable is set, then termination of the
3116 	 * link is requested.
3117 	 */
3118 	if (!conn) {
3119 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3120 					&rp, sizeof(rp));
3121 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3122 		goto unlock;
3123 	}
3124 
3125 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3126 			       sizeof(*cp));
3127 	if (!cmd) {
3128 		err = -ENOMEM;
3129 		goto unlock;
3130 	}
3131 
3132 	cmd->cmd_complete = addr_cmd_complete;
3133 
3134 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3135 				 unpair_device_complete);
3136 	if (err < 0)
3137 		mgmt_pending_free(cmd);
3138 
3139 unlock:
3140 	hci_dev_unlock(hdev);
3141 	return err;
3142 }
3143 
3144 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3145 {
3146 	struct mgmt_pending_cmd *cmd = data;
3147 
3148 	cmd->cmd_complete(cmd, mgmt_status(err));
3149 	mgmt_pending_free(cmd);
3150 }
3151 
3152 static int disconnect_sync(struct hci_dev *hdev, void *data)
3153 {
3154 	struct mgmt_pending_cmd *cmd = data;
3155 	struct mgmt_cp_disconnect *cp = cmd->param;
3156 	struct hci_conn *conn;
3157 
3158 	if (cp->addr.type == BDADDR_BREDR)
3159 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3160 					       &cp->addr.bdaddr);
3161 	else
3162 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3163 					       le_addr_type(cp->addr.type));
3164 
3165 	if (!conn)
3166 		return -ENOTCONN;
3167 
3168 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3169 	 * will clean up the connection no matter the error.
3170 	 */
3171 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3172 
3173 	return 0;
3174 }
3175 
3176 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3177 		      u16 len)
3178 {
3179 	struct mgmt_cp_disconnect *cp = data;
3180 	struct mgmt_rp_disconnect rp;
3181 	struct mgmt_pending_cmd *cmd;
3182 	int err;
3183 
3184 	bt_dev_dbg(hdev, "sock %p", sk);
3185 
3186 	memset(&rp, 0, sizeof(rp));
3187 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3188 	rp.addr.type = cp->addr.type;
3189 
3190 	if (!bdaddr_type_is_valid(cp->addr.type))
3191 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3192 					 MGMT_STATUS_INVALID_PARAMS,
3193 					 &rp, sizeof(rp));
3194 
3195 	hci_dev_lock(hdev);
3196 
3197 	if (!test_bit(HCI_UP, &hdev->flags)) {
3198 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3199 					MGMT_STATUS_NOT_POWERED, &rp,
3200 					sizeof(rp));
3201 		goto failed;
3202 	}
3203 
3204 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3205 	if (!cmd) {
3206 		err = -ENOMEM;
3207 		goto failed;
3208 	}
3209 
3210 	cmd->cmd_complete = generic_cmd_complete;
3211 
3212 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3213 				 disconnect_complete);
3214 	if (err < 0)
3215 		mgmt_pending_free(cmd);
3216 
3217 failed:
3218 	hci_dev_unlock(hdev);
3219 	return err;
3220 }
3221 
3222 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3223 {
3224 	switch (link_type) {
3225 	case CIS_LINK:
3226 	case BIS_LINK:
3227 	case LE_LINK:
3228 		switch (addr_type) {
3229 		case ADDR_LE_DEV_PUBLIC:
3230 			return BDADDR_LE_PUBLIC;
3231 
3232 		default:
3233 			/* Fallback to LE Random address type */
3234 			return BDADDR_LE_RANDOM;
3235 		}
3236 
3237 	default:
3238 		/* Fallback to BR/EDR type */
3239 		return BDADDR_BREDR;
3240 	}
3241 }
3242 
3243 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3244 			   u16 data_len)
3245 {
3246 	struct mgmt_rp_get_connections *rp;
3247 	struct hci_conn *c;
3248 	int err;
3249 	u16 i;
3250 
3251 	bt_dev_dbg(hdev, "sock %p", sk);
3252 
3253 	hci_dev_lock(hdev);
3254 
3255 	if (!hdev_is_powered(hdev)) {
3256 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3257 				      MGMT_STATUS_NOT_POWERED);
3258 		goto unlock;
3259 	}
3260 
3261 	i = 0;
3262 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3263 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3264 			i++;
3265 	}
3266 
3267 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3268 	if (!rp) {
3269 		err = -ENOMEM;
3270 		goto unlock;
3271 	}
3272 
3273 	i = 0;
3274 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3275 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3276 			continue;
3277 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3278 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3279 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3280 			continue;
3281 		i++;
3282 	}
3283 
3284 	rp->conn_count = cpu_to_le16(i);
3285 
3286 	/* Recalculate length in case of filtered SCO connections, etc */
3287 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3288 				struct_size(rp, addr, i));
3289 
3290 	kfree(rp);
3291 
3292 unlock:
3293 	hci_dev_unlock(hdev);
3294 	return err;
3295 }
3296 
3297 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3298 				   struct mgmt_cp_pin_code_neg_reply *cp)
3299 {
3300 	struct mgmt_pending_cmd *cmd;
3301 	int err;
3302 
3303 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3304 			       sizeof(*cp));
3305 	if (!cmd)
3306 		return -ENOMEM;
3307 
3308 	cmd->cmd_complete = addr_cmd_complete;
3309 
3310 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3311 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3312 	if (err < 0)
3313 		mgmt_pending_remove(cmd);
3314 
3315 	return err;
3316 }
3317 
3318 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3319 			  u16 len)
3320 {
3321 	struct hci_conn *conn;
3322 	struct mgmt_cp_pin_code_reply *cp = data;
3323 	struct hci_cp_pin_code_reply reply;
3324 	struct mgmt_pending_cmd *cmd;
3325 	int err;
3326 
3327 	bt_dev_dbg(hdev, "sock %p", sk);
3328 
3329 	hci_dev_lock(hdev);
3330 
3331 	if (!hdev_is_powered(hdev)) {
3332 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3333 				      MGMT_STATUS_NOT_POWERED);
3334 		goto failed;
3335 	}
3336 
3337 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3338 	if (!conn) {
3339 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3340 				      MGMT_STATUS_NOT_CONNECTED);
3341 		goto failed;
3342 	}
3343 
3344 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3345 		struct mgmt_cp_pin_code_neg_reply ncp;
3346 
3347 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3348 
3349 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3350 
3351 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3352 		if (err >= 0)
3353 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3354 					      MGMT_STATUS_INVALID_PARAMS);
3355 
3356 		goto failed;
3357 	}
3358 
3359 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3360 	if (!cmd) {
3361 		err = -ENOMEM;
3362 		goto failed;
3363 	}
3364 
3365 	cmd->cmd_complete = addr_cmd_complete;
3366 
3367 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3368 	reply.pin_len = cp->pin_len;
3369 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3370 
3371 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3372 	if (err < 0)
3373 		mgmt_pending_remove(cmd);
3374 
3375 failed:
3376 	hci_dev_unlock(hdev);
3377 	return err;
3378 }
3379 
3380 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3381 			     u16 len)
3382 {
3383 	struct mgmt_cp_set_io_capability *cp = data;
3384 
3385 	bt_dev_dbg(hdev, "sock %p", sk);
3386 
3387 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3389 				       MGMT_STATUS_INVALID_PARAMS);
3390 
3391 	hci_dev_lock(hdev);
3392 
3393 	hdev->io_capability = cp->io_capability;
3394 
3395 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3396 
3397 	hci_dev_unlock(hdev);
3398 
3399 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3400 				 NULL, 0);
3401 }
3402 
3403 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3404 {
3405 	struct hci_dev *hdev = conn->hdev;
3406 	struct mgmt_pending_cmd *cmd;
3407 
3408 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3409 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3410 			continue;
3411 
3412 		if (cmd->user_data != conn)
3413 			continue;
3414 
3415 		return cmd;
3416 	}
3417 
3418 	return NULL;
3419 }
3420 
3421 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3422 {
3423 	struct mgmt_rp_pair_device rp;
3424 	struct hci_conn *conn = cmd->user_data;
3425 	int err;
3426 
3427 	bacpy(&rp.addr.bdaddr, &conn->dst);
3428 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3429 
3430 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3431 				status, &rp, sizeof(rp));
3432 
3433 	/* So we don't get further callbacks for this connection */
3434 	conn->connect_cfm_cb = NULL;
3435 	conn->security_cfm_cb = NULL;
3436 	conn->disconn_cfm_cb = NULL;
3437 
3438 	hci_conn_drop(conn);
3439 
3440 	/* The device is paired so there is no need to remove
3441 	 * its connection parameters anymore.
3442 	 */
3443 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3444 
3445 	hci_conn_put(conn);
3446 
3447 	return err;
3448 }
3449 
3450 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3451 {
3452 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3453 	struct mgmt_pending_cmd *cmd;
3454 
3455 	cmd = find_pairing(conn);
3456 	if (cmd) {
3457 		cmd->cmd_complete(cmd, status);
3458 		mgmt_pending_remove(cmd);
3459 	}
3460 }
3461 
3462 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3463 {
3464 	struct mgmt_pending_cmd *cmd;
3465 
3466 	BT_DBG("status %u", status);
3467 
3468 	cmd = find_pairing(conn);
3469 	if (!cmd) {
3470 		BT_DBG("Unable to find a pending command");
3471 		return;
3472 	}
3473 
3474 	cmd->cmd_complete(cmd, mgmt_status(status));
3475 	mgmt_pending_remove(cmd);
3476 }
3477 
3478 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3479 {
3480 	struct mgmt_pending_cmd *cmd;
3481 
3482 	BT_DBG("status %u", status);
3483 
3484 	if (!status)
3485 		return;
3486 
3487 	cmd = find_pairing(conn);
3488 	if (!cmd) {
3489 		BT_DBG("Unable to find a pending command");
3490 		return;
3491 	}
3492 
3493 	cmd->cmd_complete(cmd, mgmt_status(status));
3494 	mgmt_pending_remove(cmd);
3495 }
3496 
3497 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3498 		       u16 len)
3499 {
3500 	struct mgmt_cp_pair_device *cp = data;
3501 	struct mgmt_rp_pair_device rp;
3502 	struct mgmt_pending_cmd *cmd;
3503 	u8 sec_level, auth_type;
3504 	struct hci_conn *conn;
3505 	int err;
3506 
3507 	bt_dev_dbg(hdev, "sock %p", sk);
3508 
3509 	memset(&rp, 0, sizeof(rp));
3510 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3511 	rp.addr.type = cp->addr.type;
3512 
3513 	if (!bdaddr_type_is_valid(cp->addr.type))
3514 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3515 					 MGMT_STATUS_INVALID_PARAMS,
3516 					 &rp, sizeof(rp));
3517 
3518 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3519 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3520 					 MGMT_STATUS_INVALID_PARAMS,
3521 					 &rp, sizeof(rp));
3522 
3523 	hci_dev_lock(hdev);
3524 
3525 	if (!hdev_is_powered(hdev)) {
3526 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3527 					MGMT_STATUS_NOT_POWERED, &rp,
3528 					sizeof(rp));
3529 		goto unlock;
3530 	}
3531 
3532 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3533 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3534 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3535 					sizeof(rp));
3536 		goto unlock;
3537 	}
3538 
3539 	sec_level = BT_SECURITY_MEDIUM;
3540 	auth_type = HCI_AT_DEDICATED_BONDING;
3541 
3542 	if (cp->addr.type == BDADDR_BREDR) {
3543 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3544 				       auth_type, CONN_REASON_PAIR_DEVICE,
3545 				       HCI_ACL_CONN_TIMEOUT);
3546 	} else {
3547 		u8 addr_type = le_addr_type(cp->addr.type);
3548 		struct hci_conn_params *p;
3549 
3550 		/* When pairing a new device, it is expected to remember
3551 		 * this device for future connections. Adding the connection
3552 		 * parameter information ahead of time allows tracking
3553 		 * of the peripheral preferred values and will speed up any
3554 		 * further connection establishment.
3555 		 *
3556 		 * If connection parameters already exist, then they
3557 		 * will be kept and this function does nothing.
3558 		 */
3559 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3560 		if (!p) {
3561 			err = -EIO;
3562 			goto unlock;
3563 		}
3564 
3565 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3566 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3567 
3568 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3569 					   sec_level, HCI_LE_CONN_TIMEOUT,
3570 					   CONN_REASON_PAIR_DEVICE);
3571 	}
3572 
3573 	if (IS_ERR(conn)) {
3574 		int status;
3575 
3576 		if (PTR_ERR(conn) == -EBUSY)
3577 			status = MGMT_STATUS_BUSY;
3578 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3579 			status = MGMT_STATUS_NOT_SUPPORTED;
3580 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3581 			status = MGMT_STATUS_REJECTED;
3582 		else
3583 			status = MGMT_STATUS_CONNECT_FAILED;
3584 
3585 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3586 					status, &rp, sizeof(rp));
3587 		goto unlock;
3588 	}
3589 
3590 	if (conn->connect_cfm_cb) {
3591 		hci_conn_drop(conn);
3592 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3593 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3594 		goto unlock;
3595 	}
3596 
3597 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3598 	if (!cmd) {
3599 		err = -ENOMEM;
3600 		hci_conn_drop(conn);
3601 		goto unlock;
3602 	}
3603 
3604 	cmd->cmd_complete = pairing_complete;
3605 
3606 	/* For LE, just connecting isn't a proof that the pairing finished */
3607 	if (cp->addr.type == BDADDR_BREDR) {
3608 		conn->connect_cfm_cb = pairing_complete_cb;
3609 		conn->security_cfm_cb = pairing_complete_cb;
3610 		conn->disconn_cfm_cb = pairing_complete_cb;
3611 	} else {
3612 		conn->connect_cfm_cb = le_pairing_complete_cb;
3613 		conn->security_cfm_cb = le_pairing_complete_cb;
3614 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3615 	}
3616 
3617 	conn->io_capability = cp->io_cap;
3618 	cmd->user_data = hci_conn_get(conn);
3619 
3620 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3621 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3622 		cmd->cmd_complete(cmd, 0);
3623 		mgmt_pending_remove(cmd);
3624 	}
3625 
3626 	err = 0;
3627 
3628 unlock:
3629 	hci_dev_unlock(hdev);
3630 	return err;
3631 }
3632 
3633 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3634 			      u16 len)
3635 {
3636 	struct mgmt_addr_info *addr = data;
3637 	struct mgmt_pending_cmd *cmd;
3638 	struct hci_conn *conn;
3639 	int err;
3640 
3641 	bt_dev_dbg(hdev, "sock %p", sk);
3642 
3643 	hci_dev_lock(hdev);
3644 
3645 	if (!hdev_is_powered(hdev)) {
3646 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3647 				      MGMT_STATUS_NOT_POWERED);
3648 		goto unlock;
3649 	}
3650 
3651 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3652 	if (!cmd) {
3653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3654 				      MGMT_STATUS_INVALID_PARAMS);
3655 		goto unlock;
3656 	}
3657 
3658 	conn = cmd->user_data;
3659 
3660 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3661 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3662 				      MGMT_STATUS_INVALID_PARAMS);
3663 		goto unlock;
3664 	}
3665 
3666 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3667 	mgmt_pending_remove(cmd);
3668 
3669 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3670 				addr, sizeof(*addr));
3671 
3672 	/* Since user doesn't want to proceed with the connection, abort any
3673 	 * ongoing pairing and then terminate the link if it was created
3674 	 * because of the pair device action.
3675 	 */
3676 	if (addr->type == BDADDR_BREDR)
3677 		hci_remove_link_key(hdev, &addr->bdaddr);
3678 	else
3679 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3680 					      le_addr_type(addr->type));
3681 
3682 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3683 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3684 
3685 unlock:
3686 	hci_dev_unlock(hdev);
3687 	return err;
3688 }
3689 
3690 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3691 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3692 			     u16 hci_op, __le32 passkey)
3693 {
3694 	struct mgmt_pending_cmd *cmd;
3695 	struct hci_conn *conn;
3696 	int err;
3697 
3698 	hci_dev_lock(hdev);
3699 
3700 	if (!hdev_is_powered(hdev)) {
3701 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3702 					MGMT_STATUS_NOT_POWERED, addr,
3703 					sizeof(*addr));
3704 		goto done;
3705 	}
3706 
3707 	if (addr->type == BDADDR_BREDR)
3708 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3709 	else
3710 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3711 					       le_addr_type(addr->type));
3712 
3713 	if (!conn) {
3714 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3715 					MGMT_STATUS_NOT_CONNECTED, addr,
3716 					sizeof(*addr));
3717 		goto done;
3718 	}
3719 
3720 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3721 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3722 		if (!err)
3723 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3724 						MGMT_STATUS_SUCCESS, addr,
3725 						sizeof(*addr));
3726 		else
3727 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3728 						MGMT_STATUS_FAILED, addr,
3729 						sizeof(*addr));
3730 
3731 		goto done;
3732 	}
3733 
3734 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3735 	if (!cmd) {
3736 		err = -ENOMEM;
3737 		goto done;
3738 	}
3739 
3740 	cmd->cmd_complete = addr_cmd_complete;
3741 
3742 	/* Continue with pairing via HCI */
3743 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3744 		struct hci_cp_user_passkey_reply cp;
3745 
3746 		bacpy(&cp.bdaddr, &addr->bdaddr);
3747 		cp.passkey = passkey;
3748 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3749 	} else
3750 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3751 				   &addr->bdaddr);
3752 
3753 	if (err < 0)
3754 		mgmt_pending_remove(cmd);
3755 
3756 done:
3757 	hci_dev_unlock(hdev);
3758 	return err;
3759 }
3760 
3761 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3762 			      void *data, u16 len)
3763 {
3764 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3765 
3766 	bt_dev_dbg(hdev, "sock %p", sk);
3767 
3768 	return user_pairing_resp(sk, hdev, &cp->addr,
3769 				MGMT_OP_PIN_CODE_NEG_REPLY,
3770 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3771 }
3772 
3773 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3774 			      u16 len)
3775 {
3776 	struct mgmt_cp_user_confirm_reply *cp = data;
3777 
3778 	bt_dev_dbg(hdev, "sock %p", sk);
3779 
3780 	if (len != sizeof(*cp))
3781 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3782 				       MGMT_STATUS_INVALID_PARAMS);
3783 
3784 	return user_pairing_resp(sk, hdev, &cp->addr,
3785 				 MGMT_OP_USER_CONFIRM_REPLY,
3786 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3787 }
3788 
3789 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3790 				  void *data, u16 len)
3791 {
3792 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3793 
3794 	bt_dev_dbg(hdev, "sock %p", sk);
3795 
3796 	return user_pairing_resp(sk, hdev, &cp->addr,
3797 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3798 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3799 }
3800 
3801 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3802 			      u16 len)
3803 {
3804 	struct mgmt_cp_user_passkey_reply *cp = data;
3805 
3806 	bt_dev_dbg(hdev, "sock %p", sk);
3807 
3808 	return user_pairing_resp(sk, hdev, &cp->addr,
3809 				 MGMT_OP_USER_PASSKEY_REPLY,
3810 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3811 }
3812 
3813 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3814 				  void *data, u16 len)
3815 {
3816 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3817 
3818 	bt_dev_dbg(hdev, "sock %p", sk);
3819 
3820 	return user_pairing_resp(sk, hdev, &cp->addr,
3821 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3822 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3823 }
3824 
3825 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3826 {
3827 	struct adv_info *adv_instance;
3828 
3829 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3830 	if (!adv_instance)
3831 		return 0;
3832 
3833 	/* stop if current instance doesn't need to be changed */
3834 	if (!(adv_instance->flags & flags))
3835 		return 0;
3836 
3837 	cancel_adv_timeout(hdev);
3838 
3839 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3840 	if (!adv_instance)
3841 		return 0;
3842 
3843 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3844 
3845 	return 0;
3846 }
3847 
3848 static int name_changed_sync(struct hci_dev *hdev, void *data)
3849 {
3850 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3851 }
3852 
3853 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3854 {
3855 	struct mgmt_pending_cmd *cmd = data;
3856 	struct mgmt_cp_set_local_name *cp = cmd->param;
3857 	u8 status = mgmt_status(err);
3858 
3859 	bt_dev_dbg(hdev, "err %d", err);
3860 
3861 	if (err == -ECANCELED ||
3862 	    cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3863 		return;
3864 
3865 	if (status) {
3866 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3867 				status);
3868 	} else {
3869 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3870 				  cp, sizeof(*cp));
3871 
3872 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3873 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3874 	}
3875 
3876 	mgmt_pending_remove(cmd);
3877 }
3878 
3879 static int set_name_sync(struct hci_dev *hdev, void *data)
3880 {
3881 	if (lmp_bredr_capable(hdev)) {
3882 		hci_update_name_sync(hdev);
3883 		hci_update_eir_sync(hdev);
3884 	}
3885 
3886 	/* The name is stored in the scan response data and so
3887 	 * no need to update the advertising data here.
3888 	 */
3889 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3890 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3891 
3892 	return 0;
3893 }
3894 
3895 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3896 			  u16 len)
3897 {
3898 	struct mgmt_cp_set_local_name *cp = data;
3899 	struct mgmt_pending_cmd *cmd;
3900 	int err;
3901 
3902 	bt_dev_dbg(hdev, "sock %p", sk);
3903 
3904 	hci_dev_lock(hdev);
3905 
3906 	/* If the old values are the same as the new ones just return a
3907 	 * direct command complete event.
3908 	 */
3909 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3910 	    !memcmp(hdev->short_name, cp->short_name,
3911 		    sizeof(hdev->short_name))) {
3912 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3913 					data, len);
3914 		goto failed;
3915 	}
3916 
3917 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3918 
3919 	if (!hdev_is_powered(hdev)) {
3920 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3921 
3922 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3923 					data, len);
3924 		if (err < 0)
3925 			goto failed;
3926 
3927 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3928 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3929 		ext_info_changed(hdev, sk);
3930 
3931 		goto failed;
3932 	}
3933 
3934 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3935 	if (!cmd)
3936 		err = -ENOMEM;
3937 	else
3938 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3939 					 set_name_complete);
3940 
3941 	if (err < 0) {
3942 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3943 				      MGMT_STATUS_FAILED);
3944 
3945 		if (cmd)
3946 			mgmt_pending_remove(cmd);
3947 
3948 		goto failed;
3949 	}
3950 
3951 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3952 
3953 failed:
3954 	hci_dev_unlock(hdev);
3955 	return err;
3956 }
3957 
3958 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3959 {
3960 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3961 }
3962 
3963 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3964 			  u16 len)
3965 {
3966 	struct mgmt_cp_set_appearance *cp = data;
3967 	u16 appearance;
3968 	int err;
3969 
3970 	bt_dev_dbg(hdev, "sock %p", sk);
3971 
3972 	if (!lmp_le_capable(hdev))
3973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3974 				       MGMT_STATUS_NOT_SUPPORTED);
3975 
3976 	appearance = le16_to_cpu(cp->appearance);
3977 
3978 	hci_dev_lock(hdev);
3979 
3980 	if (hdev->appearance != appearance) {
3981 		hdev->appearance = appearance;
3982 
3983 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3984 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3985 					   NULL);
3986 
3987 		ext_info_changed(hdev, sk);
3988 	}
3989 
3990 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3991 				0);
3992 
3993 	hci_dev_unlock(hdev);
3994 
3995 	return err;
3996 }
3997 
3998 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3999 				 void *data, u16 len)
4000 {
4001 	struct mgmt_rp_get_phy_configuration rp;
4002 
4003 	bt_dev_dbg(hdev, "sock %p", sk);
4004 
4005 	hci_dev_lock(hdev);
4006 
4007 	memset(&rp, 0, sizeof(rp));
4008 
4009 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4010 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4011 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4012 
4013 	hci_dev_unlock(hdev);
4014 
4015 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4016 				 &rp, sizeof(rp));
4017 }
4018 
4019 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4020 {
4021 	struct mgmt_ev_phy_configuration_changed ev;
4022 
4023 	memset(&ev, 0, sizeof(ev));
4024 
4025 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4026 
4027 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4028 			  sizeof(ev), skip);
4029 }
4030 
4031 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4032 {
4033 	struct mgmt_pending_cmd *cmd = data;
4034 	struct sk_buff *skb = cmd->skb;
4035 	u8 status = mgmt_status(err);
4036 
4037 	if (err == -ECANCELED ||
4038 	    cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4039 		return;
4040 
4041 	if (!status) {
4042 		if (!skb)
4043 			status = MGMT_STATUS_FAILED;
4044 		else if (IS_ERR(skb))
4045 			status = mgmt_status(PTR_ERR(skb));
4046 		else
4047 			status = mgmt_status(skb->data[0]);
4048 	}
4049 
4050 	bt_dev_dbg(hdev, "status %d", status);
4051 
4052 	if (status) {
4053 		mgmt_cmd_status(cmd->sk, hdev->id,
4054 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4055 	} else {
4056 		mgmt_cmd_complete(cmd->sk, hdev->id,
4057 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4058 				  NULL, 0);
4059 
4060 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4061 	}
4062 
4063 	if (skb && !IS_ERR(skb))
4064 		kfree_skb(skb);
4065 
4066 	mgmt_pending_remove(cmd);
4067 }
4068 
4069 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4070 {
4071 	struct mgmt_pending_cmd *cmd = data;
4072 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4073 	struct hci_cp_le_set_default_phy cp_phy;
4074 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4075 
4076 	memset(&cp_phy, 0, sizeof(cp_phy));
4077 
4078 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4079 		cp_phy.all_phys |= 0x01;
4080 
4081 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4082 		cp_phy.all_phys |= 0x02;
4083 
4084 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4085 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4086 
4087 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4088 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4089 
4090 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4091 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4092 
4093 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4094 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4095 
4096 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4097 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4098 
4099 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4100 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4101 
4102 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4103 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4104 
4105 	return 0;
4106 }
4107 
4108 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4109 				 void *data, u16 len)
4110 {
4111 	struct mgmt_cp_set_phy_configuration *cp = data;
4112 	struct mgmt_pending_cmd *cmd;
4113 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4114 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4115 	bool changed = false;
4116 	int err;
4117 
4118 	bt_dev_dbg(hdev, "sock %p", sk);
4119 
4120 	configurable_phys = get_configurable_phys(hdev);
4121 	supported_phys = get_supported_phys(hdev);
4122 	selected_phys = __le32_to_cpu(cp->selected_phys);
4123 
4124 	if (selected_phys & ~supported_phys)
4125 		return mgmt_cmd_status(sk, hdev->id,
4126 				       MGMT_OP_SET_PHY_CONFIGURATION,
4127 				       MGMT_STATUS_INVALID_PARAMS);
4128 
4129 	unconfigure_phys = supported_phys & ~configurable_phys;
4130 
4131 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4132 		return mgmt_cmd_status(sk, hdev->id,
4133 				       MGMT_OP_SET_PHY_CONFIGURATION,
4134 				       MGMT_STATUS_INVALID_PARAMS);
4135 
4136 	if (selected_phys == get_selected_phys(hdev))
4137 		return mgmt_cmd_complete(sk, hdev->id,
4138 					 MGMT_OP_SET_PHY_CONFIGURATION,
4139 					 0, NULL, 0);
4140 
4141 	hci_dev_lock(hdev);
4142 
4143 	if (!hdev_is_powered(hdev)) {
4144 		err = mgmt_cmd_status(sk, hdev->id,
4145 				      MGMT_OP_SET_PHY_CONFIGURATION,
4146 				      MGMT_STATUS_REJECTED);
4147 		goto unlock;
4148 	}
4149 
4150 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4151 		err = mgmt_cmd_status(sk, hdev->id,
4152 				      MGMT_OP_SET_PHY_CONFIGURATION,
4153 				      MGMT_STATUS_BUSY);
4154 		goto unlock;
4155 	}
4156 
4157 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4158 		pkt_type |= (HCI_DH3 | HCI_DM3);
4159 	else
4160 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4161 
4162 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4163 		pkt_type |= (HCI_DH5 | HCI_DM5);
4164 	else
4165 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4166 
4167 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4168 		pkt_type &= ~HCI_2DH1;
4169 	else
4170 		pkt_type |= HCI_2DH1;
4171 
4172 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4173 		pkt_type &= ~HCI_2DH3;
4174 	else
4175 		pkt_type |= HCI_2DH3;
4176 
4177 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4178 		pkt_type &= ~HCI_2DH5;
4179 	else
4180 		pkt_type |= HCI_2DH5;
4181 
4182 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4183 		pkt_type &= ~HCI_3DH1;
4184 	else
4185 		pkt_type |= HCI_3DH1;
4186 
4187 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4188 		pkt_type &= ~HCI_3DH3;
4189 	else
4190 		pkt_type |= HCI_3DH3;
4191 
4192 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4193 		pkt_type &= ~HCI_3DH5;
4194 	else
4195 		pkt_type |= HCI_3DH5;
4196 
4197 	if (pkt_type != hdev->pkt_type) {
4198 		hdev->pkt_type = pkt_type;
4199 		changed = true;
4200 	}
4201 
4202 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4203 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4204 		if (changed)
4205 			mgmt_phy_configuration_changed(hdev, sk);
4206 
4207 		err = mgmt_cmd_complete(sk, hdev->id,
4208 					MGMT_OP_SET_PHY_CONFIGURATION,
4209 					0, NULL, 0);
4210 
4211 		goto unlock;
4212 	}
4213 
4214 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4215 			       len);
4216 	if (!cmd)
4217 		err = -ENOMEM;
4218 	else
4219 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4220 					 set_default_phy_complete);
4221 
4222 	if (err < 0) {
4223 		err = mgmt_cmd_status(sk, hdev->id,
4224 				      MGMT_OP_SET_PHY_CONFIGURATION,
4225 				      MGMT_STATUS_FAILED);
4226 
4227 		if (cmd)
4228 			mgmt_pending_remove(cmd);
4229 	}
4230 
4231 unlock:
4232 	hci_dev_unlock(hdev);
4233 
4234 	return err;
4235 }
4236 
4237 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4238 			    u16 len)
4239 {
4240 	int err = MGMT_STATUS_SUCCESS;
4241 	struct mgmt_cp_set_blocked_keys *keys = data;
4242 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4243 				   sizeof(struct mgmt_blocked_key_info));
4244 	u16 key_count, expected_len;
4245 	int i;
4246 
4247 	bt_dev_dbg(hdev, "sock %p", sk);
4248 
4249 	key_count = __le16_to_cpu(keys->key_count);
4250 	if (key_count > max_key_count) {
4251 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4252 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4253 				       MGMT_STATUS_INVALID_PARAMS);
4254 	}
4255 
4256 	expected_len = struct_size(keys, keys, key_count);
4257 	if (expected_len != len) {
4258 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4259 			   expected_len, len);
4260 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4261 				       MGMT_STATUS_INVALID_PARAMS);
4262 	}
4263 
4264 	hci_dev_lock(hdev);
4265 
4266 	hci_blocked_keys_clear(hdev);
4267 
4268 	for (i = 0; i < key_count; ++i) {
4269 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4270 
4271 		if (!b) {
4272 			err = MGMT_STATUS_NO_RESOURCES;
4273 			break;
4274 		}
4275 
4276 		b->type = keys->keys[i].type;
4277 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4278 		list_add_rcu(&b->list, &hdev->blocked_keys);
4279 	}
4280 	hci_dev_unlock(hdev);
4281 
4282 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4283 				err, NULL, 0);
4284 }
4285 
4286 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4287 			       void *data, u16 len)
4288 {
4289 	struct mgmt_mode *cp = data;
4290 	int err;
4291 	bool changed = false;
4292 
4293 	bt_dev_dbg(hdev, "sock %p", sk);
4294 
4295 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4296 		return mgmt_cmd_status(sk, hdev->id,
4297 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4298 				       MGMT_STATUS_NOT_SUPPORTED);
4299 
4300 	if (cp->val != 0x00 && cp->val != 0x01)
4301 		return mgmt_cmd_status(sk, hdev->id,
4302 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4303 				       MGMT_STATUS_INVALID_PARAMS);
4304 
4305 	hci_dev_lock(hdev);
4306 
4307 	if (hdev_is_powered(hdev) &&
4308 	    !!cp->val != hci_dev_test_flag(hdev,
4309 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4310 		err = mgmt_cmd_status(sk, hdev->id,
4311 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4312 				      MGMT_STATUS_REJECTED);
4313 		goto unlock;
4314 	}
4315 
4316 	if (cp->val)
4317 		changed = !hci_dev_test_and_set_flag(hdev,
4318 						   HCI_WIDEBAND_SPEECH_ENABLED);
4319 	else
4320 		changed = hci_dev_test_and_clear_flag(hdev,
4321 						   HCI_WIDEBAND_SPEECH_ENABLED);
4322 
4323 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4324 	if (err < 0)
4325 		goto unlock;
4326 
4327 	if (changed)
4328 		err = new_settings(hdev, sk);
4329 
4330 unlock:
4331 	hci_dev_unlock(hdev);
4332 	return err;
4333 }
4334 
4335 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4336 			       void *data, u16 data_len)
4337 {
4338 	char buf[20];
4339 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4340 	u16 cap_len = 0;
4341 	u8 flags = 0;
4342 	u8 tx_power_range[2];
4343 
4344 	bt_dev_dbg(hdev, "sock %p", sk);
4345 
4346 	memset(&buf, 0, sizeof(buf));
4347 
4348 	hci_dev_lock(hdev);
4349 
4350 	/* When the Read Simple Pairing Options command is supported, then
4351 	 * the remote public key validation is supported.
4352 	 *
4353 	 * Alternatively, when Microsoft extensions are available, they can
4354 	 * indicate support for public key validation as well.
4355 	 */
4356 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4357 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4358 
4359 	flags |= 0x02;		/* Remote public key validation (LE) */
4360 
4361 	/* When the Read Encryption Key Size command is supported, then the
4362 	 * encryption key size is enforced.
4363 	 */
4364 	if (hdev->commands[20] & 0x10)
4365 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4366 
4367 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4368 
4369 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4370 				  &flags, 1);
4371 
4372 	/* When the Read Simple Pairing Options command is supported, then
4373 	 * also max encryption key size information is provided.
4374 	 */
4375 	if (hdev->commands[41] & 0x08)
4376 		cap_len = eir_append_le16(rp->cap, cap_len,
4377 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4378 					  hdev->max_enc_key_size);
4379 
4380 	cap_len = eir_append_le16(rp->cap, cap_len,
4381 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4382 				  SMP_MAX_ENC_KEY_SIZE);
4383 
4384 	/* Append the min/max LE tx power parameters if we were able to fetch
4385 	 * it from the controller
4386 	 */
4387 	if (hdev->commands[38] & 0x80) {
4388 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4389 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4390 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4391 					  tx_power_range, 2);
4392 	}
4393 
4394 	rp->cap_len = cpu_to_le16(cap_len);
4395 
4396 	hci_dev_unlock(hdev);
4397 
4398 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4399 				 rp, sizeof(*rp) + cap_len);
4400 }
4401 
4402 #ifdef CONFIG_BT_FEATURE_DEBUG
4403 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4404 static const u8 debug_uuid[16] = {
4405 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4406 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4407 };
4408 #endif
4409 
4410 /* 330859bc-7506-492d-9370-9a6f0614037f */
4411 static const u8 quality_report_uuid[16] = {
4412 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4413 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4414 };
4415 
4416 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4417 static const u8 offload_codecs_uuid[16] = {
4418 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4419 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4420 };
4421 
4422 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4423 static const u8 le_simultaneous_roles_uuid[16] = {
4424 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4425 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4426 };
4427 
4428 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4429 static const u8 iso_socket_uuid[16] = {
4430 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4431 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4432 };
4433 
4434 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4435 static const u8 mgmt_mesh_uuid[16] = {
4436 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4437 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4438 };
4439 
4440 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4441 				  void *data, u16 data_len)
4442 {
4443 	struct mgmt_rp_read_exp_features_info *rp;
4444 	size_t len;
4445 	u16 idx = 0;
4446 	u32 flags;
4447 	int status;
4448 
4449 	bt_dev_dbg(hdev, "sock %p", sk);
4450 
4451 	/* Enough space for 7 features */
4452 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4453 	rp = kzalloc(len, GFP_KERNEL);
4454 	if (!rp)
4455 		return -ENOMEM;
4456 
4457 #ifdef CONFIG_BT_FEATURE_DEBUG
4458 	if (!hdev) {
4459 		flags = bt_dbg_get() ? BIT(0) : 0;
4460 
4461 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4462 		rp->features[idx].flags = cpu_to_le32(flags);
4463 		idx++;
4464 	}
4465 #endif
4466 
4467 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4468 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4469 			flags = BIT(0);
4470 		else
4471 			flags = 0;
4472 
4473 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4474 		rp->features[idx].flags = cpu_to_le32(flags);
4475 		idx++;
4476 	}
4477 
4478 	if (hdev && (aosp_has_quality_report(hdev) ||
4479 		     hdev->set_quality_report)) {
4480 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4481 			flags = BIT(0);
4482 		else
4483 			flags = 0;
4484 
4485 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4486 		rp->features[idx].flags = cpu_to_le32(flags);
4487 		idx++;
4488 	}
4489 
4490 	if (hdev && hdev->get_data_path_id) {
4491 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4492 			flags = BIT(0);
4493 		else
4494 			flags = 0;
4495 
4496 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4497 		rp->features[idx].flags = cpu_to_le32(flags);
4498 		idx++;
4499 	}
4500 
4501 	if (IS_ENABLED(CONFIG_BT_LE)) {
4502 		flags = iso_enabled() ? BIT(0) : 0;
4503 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4504 		rp->features[idx].flags = cpu_to_le32(flags);
4505 		idx++;
4506 	}
4507 
4508 	if (hdev && lmp_le_capable(hdev)) {
4509 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4510 			flags = BIT(0);
4511 		else
4512 			flags = 0;
4513 
4514 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4515 		rp->features[idx].flags = cpu_to_le32(flags);
4516 		idx++;
4517 	}
4518 
4519 	rp->feature_count = cpu_to_le16(idx);
4520 
4521 	/* After reading the experimental features information, enable
4522 	 * the events to update client on any future change.
4523 	 */
4524 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4525 
4526 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4527 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4528 				   0, rp, sizeof(*rp) + (20 * idx));
4529 
4530 	kfree(rp);
4531 	return status;
4532 }
4533 
4534 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4535 			       bool enabled, struct sock *skip)
4536 {
4537 	struct mgmt_ev_exp_feature_changed ev;
4538 
4539 	memset(&ev, 0, sizeof(ev));
4540 	memcpy(ev.uuid, uuid, 16);
4541 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4542 
4543 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4544 				  &ev, sizeof(ev),
4545 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4546 }
4547 
4548 #define EXP_FEAT(_uuid, _set_func)	\
4549 {					\
4550 	.uuid = _uuid,			\
4551 	.set_func = _set_func,		\
4552 }
4553 
4554 /* The zero key uuid is special. Multiple exp features are set through it. */
4555 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4556 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4557 {
4558 	struct mgmt_rp_set_exp_feature rp;
4559 
4560 	memset(rp.uuid, 0, 16);
4561 	rp.flags = cpu_to_le32(0);
4562 
4563 #ifdef CONFIG_BT_FEATURE_DEBUG
4564 	if (!hdev) {
4565 		bool changed = bt_dbg_get();
4566 
4567 		bt_dbg_set(false);
4568 
4569 		if (changed)
4570 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4571 	}
4572 #endif
4573 
4574 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4575 
4576 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4577 				 MGMT_OP_SET_EXP_FEATURE, 0,
4578 				 &rp, sizeof(rp));
4579 }
4580 
4581 #ifdef CONFIG_BT_FEATURE_DEBUG
4582 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4583 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4584 {
4585 	struct mgmt_rp_set_exp_feature rp;
4586 
4587 	bool val, changed;
4588 	int err;
4589 
4590 	/* Command requires to use the non-controller index */
4591 	if (hdev)
4592 		return mgmt_cmd_status(sk, hdev->id,
4593 				       MGMT_OP_SET_EXP_FEATURE,
4594 				       MGMT_STATUS_INVALID_INDEX);
4595 
4596 	/* Parameters are limited to a single octet */
4597 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4598 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4599 				       MGMT_OP_SET_EXP_FEATURE,
4600 				       MGMT_STATUS_INVALID_PARAMS);
4601 
4602 	/* Only boolean on/off is supported */
4603 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4604 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4605 				       MGMT_OP_SET_EXP_FEATURE,
4606 				       MGMT_STATUS_INVALID_PARAMS);
4607 
4608 	val = !!cp->param[0];
4609 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4610 	bt_dbg_set(val);
4611 
4612 	memcpy(rp.uuid, debug_uuid, 16);
4613 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4614 
4615 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4616 
4617 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4618 				MGMT_OP_SET_EXP_FEATURE, 0,
4619 				&rp, sizeof(rp));
4620 
4621 	if (changed)
4622 		exp_feature_changed(hdev, debug_uuid, val, sk);
4623 
4624 	return err;
4625 }
4626 #endif
4627 
4628 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4629 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4630 {
4631 	struct mgmt_rp_set_exp_feature rp;
4632 	bool val, changed;
4633 	int err;
4634 
4635 	/* Command requires to use the controller index */
4636 	if (!hdev)
4637 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4638 				       MGMT_OP_SET_EXP_FEATURE,
4639 				       MGMT_STATUS_INVALID_INDEX);
4640 
4641 	/* Parameters are limited to a single octet */
4642 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4643 		return mgmt_cmd_status(sk, hdev->id,
4644 				       MGMT_OP_SET_EXP_FEATURE,
4645 				       MGMT_STATUS_INVALID_PARAMS);
4646 
4647 	/* Only boolean on/off is supported */
4648 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4649 		return mgmt_cmd_status(sk, hdev->id,
4650 				       MGMT_OP_SET_EXP_FEATURE,
4651 				       MGMT_STATUS_INVALID_PARAMS);
4652 
4653 	val = !!cp->param[0];
4654 
4655 	if (val) {
4656 		changed = !hci_dev_test_and_set_flag(hdev,
4657 						     HCI_MESH_EXPERIMENTAL);
4658 	} else {
4659 		hci_dev_clear_flag(hdev, HCI_MESH);
4660 		changed = hci_dev_test_and_clear_flag(hdev,
4661 						      HCI_MESH_EXPERIMENTAL);
4662 	}
4663 
4664 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4665 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4666 
4667 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4668 
4669 	err = mgmt_cmd_complete(sk, hdev->id,
4670 				MGMT_OP_SET_EXP_FEATURE, 0,
4671 				&rp, sizeof(rp));
4672 
4673 	if (changed)
4674 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4675 
4676 	return err;
4677 }
4678 
4679 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4680 				   struct mgmt_cp_set_exp_feature *cp,
4681 				   u16 data_len)
4682 {
4683 	struct mgmt_rp_set_exp_feature rp;
4684 	bool val, changed;
4685 	int err;
4686 
4687 	/* Command requires to use a valid controller index */
4688 	if (!hdev)
4689 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4690 				       MGMT_OP_SET_EXP_FEATURE,
4691 				       MGMT_STATUS_INVALID_INDEX);
4692 
4693 	/* Parameters are limited to a single octet */
4694 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4695 		return mgmt_cmd_status(sk, hdev->id,
4696 				       MGMT_OP_SET_EXP_FEATURE,
4697 				       MGMT_STATUS_INVALID_PARAMS);
4698 
4699 	/* Only boolean on/off is supported */
4700 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4701 		return mgmt_cmd_status(sk, hdev->id,
4702 				       MGMT_OP_SET_EXP_FEATURE,
4703 				       MGMT_STATUS_INVALID_PARAMS);
4704 
4705 	hci_req_sync_lock(hdev);
4706 
4707 	val = !!cp->param[0];
4708 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4709 
4710 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4711 		err = mgmt_cmd_status(sk, hdev->id,
4712 				      MGMT_OP_SET_EXP_FEATURE,
4713 				      MGMT_STATUS_NOT_SUPPORTED);
4714 		goto unlock_quality_report;
4715 	}
4716 
4717 	if (changed) {
4718 		if (hdev->set_quality_report)
4719 			err = hdev->set_quality_report(hdev, val);
4720 		else
4721 			err = aosp_set_quality_report(hdev, val);
4722 
4723 		if (err) {
4724 			err = mgmt_cmd_status(sk, hdev->id,
4725 					      MGMT_OP_SET_EXP_FEATURE,
4726 					      MGMT_STATUS_FAILED);
4727 			goto unlock_quality_report;
4728 		}
4729 
4730 		if (val)
4731 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4732 		else
4733 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4734 	}
4735 
4736 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4737 
4738 	memcpy(rp.uuid, quality_report_uuid, 16);
4739 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4740 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4741 
4742 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4743 				&rp, sizeof(rp));
4744 
4745 	if (changed)
4746 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4747 
4748 unlock_quality_report:
4749 	hci_req_sync_unlock(hdev);
4750 	return err;
4751 }
4752 
4753 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4754 				  struct mgmt_cp_set_exp_feature *cp,
4755 				  u16 data_len)
4756 {
4757 	bool val, changed;
4758 	int err;
4759 	struct mgmt_rp_set_exp_feature rp;
4760 
4761 	/* Command requires to use a valid controller index */
4762 	if (!hdev)
4763 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4764 				       MGMT_OP_SET_EXP_FEATURE,
4765 				       MGMT_STATUS_INVALID_INDEX);
4766 
4767 	/* Parameters are limited to a single octet */
4768 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4769 		return mgmt_cmd_status(sk, hdev->id,
4770 				       MGMT_OP_SET_EXP_FEATURE,
4771 				       MGMT_STATUS_INVALID_PARAMS);
4772 
4773 	/* Only boolean on/off is supported */
4774 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4775 		return mgmt_cmd_status(sk, hdev->id,
4776 				       MGMT_OP_SET_EXP_FEATURE,
4777 				       MGMT_STATUS_INVALID_PARAMS);
4778 
4779 	val = !!cp->param[0];
4780 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4781 
4782 	if (!hdev->get_data_path_id) {
4783 		return mgmt_cmd_status(sk, hdev->id,
4784 				       MGMT_OP_SET_EXP_FEATURE,
4785 				       MGMT_STATUS_NOT_SUPPORTED);
4786 	}
4787 
4788 	if (changed) {
4789 		if (val)
4790 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4791 		else
4792 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4793 	}
4794 
4795 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4796 		    val, changed);
4797 
4798 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4799 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4800 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4801 	err = mgmt_cmd_complete(sk, hdev->id,
4802 				MGMT_OP_SET_EXP_FEATURE, 0,
4803 				&rp, sizeof(rp));
4804 
4805 	if (changed)
4806 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4807 
4808 	return err;
4809 }
4810 
4811 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4812 					  struct mgmt_cp_set_exp_feature *cp,
4813 					  u16 data_len)
4814 {
4815 	bool val, changed;
4816 	int err;
4817 	struct mgmt_rp_set_exp_feature rp;
4818 
4819 	/* Command requires to use a valid controller index */
4820 	if (!hdev)
4821 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4822 				       MGMT_OP_SET_EXP_FEATURE,
4823 				       MGMT_STATUS_INVALID_INDEX);
4824 
4825 	/* Parameters are limited to a single octet */
4826 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4827 		return mgmt_cmd_status(sk, hdev->id,
4828 				       MGMT_OP_SET_EXP_FEATURE,
4829 				       MGMT_STATUS_INVALID_PARAMS);
4830 
4831 	/* Only boolean on/off is supported */
4832 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4833 		return mgmt_cmd_status(sk, hdev->id,
4834 				       MGMT_OP_SET_EXP_FEATURE,
4835 				       MGMT_STATUS_INVALID_PARAMS);
4836 
4837 	val = !!cp->param[0];
4838 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4839 
4840 	if (!hci_dev_le_state_simultaneous(hdev)) {
4841 		return mgmt_cmd_status(sk, hdev->id,
4842 				       MGMT_OP_SET_EXP_FEATURE,
4843 				       MGMT_STATUS_NOT_SUPPORTED);
4844 	}
4845 
4846 	if (changed) {
4847 		if (val)
4848 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4849 		else
4850 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4851 	}
4852 
4853 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4854 		    val, changed);
4855 
4856 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4857 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4858 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4859 	err = mgmt_cmd_complete(sk, hdev->id,
4860 				MGMT_OP_SET_EXP_FEATURE, 0,
4861 				&rp, sizeof(rp));
4862 
4863 	if (changed)
4864 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4865 
4866 	return err;
4867 }
4868 
4869 #ifdef CONFIG_BT_LE
4870 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4871 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4872 {
4873 	struct mgmt_rp_set_exp_feature rp;
4874 	bool val, changed = false;
4875 	int err;
4876 
4877 	/* Command requires to use the non-controller index */
4878 	if (hdev)
4879 		return mgmt_cmd_status(sk, hdev->id,
4880 				       MGMT_OP_SET_EXP_FEATURE,
4881 				       MGMT_STATUS_INVALID_INDEX);
4882 
4883 	/* Parameters are limited to a single octet */
4884 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4885 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4886 				       MGMT_OP_SET_EXP_FEATURE,
4887 				       MGMT_STATUS_INVALID_PARAMS);
4888 
4889 	/* Only boolean on/off is supported */
4890 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4891 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 				       MGMT_OP_SET_EXP_FEATURE,
4893 				       MGMT_STATUS_INVALID_PARAMS);
4894 
4895 	val = cp->param[0] ? true : false;
4896 	if (val)
4897 		err = iso_init();
4898 	else
4899 		err = iso_exit();
4900 
4901 	if (!err)
4902 		changed = true;
4903 
4904 	memcpy(rp.uuid, iso_socket_uuid, 16);
4905 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4906 
4907 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4908 
4909 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4910 				MGMT_OP_SET_EXP_FEATURE, 0,
4911 				&rp, sizeof(rp));
4912 
4913 	if (changed)
4914 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4915 
4916 	return err;
4917 }
4918 #endif
4919 
4920 static const struct mgmt_exp_feature {
4921 	const u8 *uuid;
4922 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4923 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4924 } exp_features[] = {
4925 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4926 #ifdef CONFIG_BT_FEATURE_DEBUG
4927 	EXP_FEAT(debug_uuid, set_debug_func),
4928 #endif
4929 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4930 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4931 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4932 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4933 #ifdef CONFIG_BT_LE
4934 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4935 #endif
4936 
4937 	/* end with a null feature */
4938 	EXP_FEAT(NULL, NULL)
4939 };
4940 
4941 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4942 			   void *data, u16 data_len)
4943 {
4944 	struct mgmt_cp_set_exp_feature *cp = data;
4945 	size_t i = 0;
4946 
4947 	bt_dev_dbg(hdev, "sock %p", sk);
4948 
4949 	for (i = 0; exp_features[i].uuid; i++) {
4950 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4951 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4952 	}
4953 
4954 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4955 			       MGMT_OP_SET_EXP_FEATURE,
4956 			       MGMT_STATUS_NOT_SUPPORTED);
4957 }
4958 
4959 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4960 			    u16 data_len)
4961 {
4962 	struct mgmt_cp_get_device_flags *cp = data;
4963 	struct mgmt_rp_get_device_flags rp;
4964 	struct bdaddr_list_with_flags *br_params;
4965 	struct hci_conn_params *params;
4966 	u32 supported_flags;
4967 	u32 current_flags = 0;
4968 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4969 
4970 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4971 		   &cp->addr.bdaddr, cp->addr.type);
4972 
4973 	hci_dev_lock(hdev);
4974 
4975 	supported_flags = hdev->conn_flags;
4976 
4977 	memset(&rp, 0, sizeof(rp));
4978 
4979 	if (cp->addr.type == BDADDR_BREDR) {
4980 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4981 							      &cp->addr.bdaddr,
4982 							      cp->addr.type);
4983 		if (!br_params)
4984 			goto done;
4985 
4986 		current_flags = br_params->flags;
4987 	} else {
4988 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4989 						le_addr_type(cp->addr.type));
4990 		if (!params)
4991 			goto done;
4992 
4993 		current_flags = params->flags;
4994 	}
4995 
4996 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4997 	rp.addr.type = cp->addr.type;
4998 	rp.supported_flags = cpu_to_le32(supported_flags);
4999 	rp.current_flags = cpu_to_le32(current_flags);
5000 
5001 	status = MGMT_STATUS_SUCCESS;
5002 
5003 done:
5004 	hci_dev_unlock(hdev);
5005 
5006 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5007 				&rp, sizeof(rp));
5008 }
5009 
5010 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5011 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5012 				 u32 supported_flags, u32 current_flags)
5013 {
5014 	struct mgmt_ev_device_flags_changed ev;
5015 
5016 	bacpy(&ev.addr.bdaddr, bdaddr);
5017 	ev.addr.type = bdaddr_type;
5018 	ev.supported_flags = cpu_to_le32(supported_flags);
5019 	ev.current_flags = cpu_to_le32(current_flags);
5020 
5021 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5022 }
5023 
5024 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5025 			    u16 len)
5026 {
5027 	struct mgmt_cp_set_device_flags *cp = data;
5028 	struct bdaddr_list_with_flags *br_params;
5029 	struct hci_conn_params *params;
5030 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5031 	u32 supported_flags;
5032 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5033 
5034 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5035 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5036 
5037 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5038 	supported_flags = hdev->conn_flags;
5039 
5040 	if ((supported_flags | current_flags) != supported_flags) {
5041 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5042 			    current_flags, supported_flags);
5043 		goto done;
5044 	}
5045 
5046 	hci_dev_lock(hdev);
5047 
5048 	if (cp->addr.type == BDADDR_BREDR) {
5049 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5050 							      &cp->addr.bdaddr,
5051 							      cp->addr.type);
5052 
5053 		if (br_params) {
5054 			br_params->flags = current_flags;
5055 			status = MGMT_STATUS_SUCCESS;
5056 		} else {
5057 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5058 				    &cp->addr.bdaddr, cp->addr.type);
5059 		}
5060 
5061 		goto unlock;
5062 	}
5063 
5064 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5065 					le_addr_type(cp->addr.type));
5066 	if (!params) {
5067 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5068 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5069 		goto unlock;
5070 	}
5071 
5072 	supported_flags = hdev->conn_flags;
5073 
5074 	if ((supported_flags | current_flags) != supported_flags) {
5075 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5076 			    current_flags, supported_flags);
5077 		goto unlock;
5078 	}
5079 
5080 	WRITE_ONCE(params->flags, current_flags);
5081 	status = MGMT_STATUS_SUCCESS;
5082 
5083 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5084 	 * has been set.
5085 	 */
5086 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5087 		hci_update_passive_scan(hdev);
5088 
5089 unlock:
5090 	hci_dev_unlock(hdev);
5091 
5092 done:
5093 	if (status == MGMT_STATUS_SUCCESS)
5094 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5095 				     supported_flags, current_flags);
5096 
5097 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5098 				 &cp->addr, sizeof(cp->addr));
5099 }
5100 
5101 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5102 				   u16 handle)
5103 {
5104 	struct mgmt_ev_adv_monitor_added ev;
5105 
5106 	ev.monitor_handle = cpu_to_le16(handle);
5107 
5108 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5109 }
5110 
5111 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5112 {
5113 	struct mgmt_ev_adv_monitor_removed ev;
5114 	struct mgmt_pending_cmd *cmd;
5115 	struct sock *sk_skip = NULL;
5116 	struct mgmt_cp_remove_adv_monitor *cp;
5117 
5118 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5119 	if (cmd) {
5120 		cp = cmd->param;
5121 
5122 		if (cp->monitor_handle)
5123 			sk_skip = cmd->sk;
5124 	}
5125 
5126 	ev.monitor_handle = cpu_to_le16(handle);
5127 
5128 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5129 }
5130 
5131 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5132 				 void *data, u16 len)
5133 {
5134 	struct adv_monitor *monitor = NULL;
5135 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5136 	int handle, err;
5137 	size_t rp_size = 0;
5138 	__u32 supported = 0;
5139 	__u32 enabled = 0;
5140 	__u16 num_handles = 0;
5141 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5142 
5143 	BT_DBG("request for %s", hdev->name);
5144 
5145 	hci_dev_lock(hdev);
5146 
5147 	if (msft_monitor_supported(hdev))
5148 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5149 
5150 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5151 		handles[num_handles++] = monitor->handle;
5152 
5153 	hci_dev_unlock(hdev);
5154 
5155 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5156 	rp = kmalloc(rp_size, GFP_KERNEL);
5157 	if (!rp)
5158 		return -ENOMEM;
5159 
5160 	/* All supported features are currently enabled */
5161 	enabled = supported;
5162 
5163 	rp->supported_features = cpu_to_le32(supported);
5164 	rp->enabled_features = cpu_to_le32(enabled);
5165 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5166 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5167 	rp->num_handles = cpu_to_le16(num_handles);
5168 	if (num_handles)
5169 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5170 
5171 	err = mgmt_cmd_complete(sk, hdev->id,
5172 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5173 				MGMT_STATUS_SUCCESS, rp, rp_size);
5174 
5175 	kfree(rp);
5176 
5177 	return err;
5178 }
5179 
5180 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5181 						   void *data, int status)
5182 {
5183 	struct mgmt_rp_add_adv_patterns_monitor rp;
5184 	struct mgmt_pending_cmd *cmd = data;
5185 	struct adv_monitor *monitor = cmd->user_data;
5186 
5187 	hci_dev_lock(hdev);
5188 
5189 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5190 
5191 	if (!status) {
5192 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5193 		hdev->adv_monitors_cnt++;
5194 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5195 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5196 		hci_update_passive_scan(hdev);
5197 	}
5198 
5199 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5200 			  mgmt_status(status), &rp, sizeof(rp));
5201 	mgmt_pending_remove(cmd);
5202 
5203 	hci_dev_unlock(hdev);
5204 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5205 		   rp.monitor_handle, status);
5206 }
5207 
5208 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5209 {
5210 	struct mgmt_pending_cmd *cmd = data;
5211 	struct adv_monitor *monitor = cmd->user_data;
5212 
5213 	return hci_add_adv_monitor(hdev, monitor);
5214 }
5215 
5216 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5217 				      struct adv_monitor *m, u8 status,
5218 				      void *data, u16 len, u16 op)
5219 {
5220 	struct mgmt_pending_cmd *cmd;
5221 	int err;
5222 
5223 	hci_dev_lock(hdev);
5224 
5225 	if (status)
5226 		goto unlock;
5227 
5228 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5229 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5230 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5231 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5232 		status = MGMT_STATUS_BUSY;
5233 		goto unlock;
5234 	}
5235 
5236 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5237 	if (!cmd) {
5238 		status = MGMT_STATUS_NO_RESOURCES;
5239 		goto unlock;
5240 	}
5241 
5242 	cmd->user_data = m;
5243 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5244 				 mgmt_add_adv_patterns_monitor_complete);
5245 	if (err) {
5246 		if (err == -ENOMEM)
5247 			status = MGMT_STATUS_NO_RESOURCES;
5248 		else
5249 			status = MGMT_STATUS_FAILED;
5250 
5251 		goto unlock;
5252 	}
5253 
5254 	hci_dev_unlock(hdev);
5255 
5256 	return 0;
5257 
5258 unlock:
5259 	hci_free_adv_monitor(hdev, m);
5260 	hci_dev_unlock(hdev);
5261 	return mgmt_cmd_status(sk, hdev->id, op, status);
5262 }
5263 
5264 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5265 				   struct mgmt_adv_rssi_thresholds *rssi)
5266 {
5267 	if (rssi) {
5268 		m->rssi.low_threshold = rssi->low_threshold;
5269 		m->rssi.low_threshold_timeout =
5270 		    __le16_to_cpu(rssi->low_threshold_timeout);
5271 		m->rssi.high_threshold = rssi->high_threshold;
5272 		m->rssi.high_threshold_timeout =
5273 		    __le16_to_cpu(rssi->high_threshold_timeout);
5274 		m->rssi.sampling_period = rssi->sampling_period;
5275 	} else {
5276 		/* Default values. These numbers are the least constricting
5277 		 * parameters for MSFT API to work, so it behaves as if there
5278 		 * are no rssi parameter to consider. May need to be changed
5279 		 * if other API are to be supported.
5280 		 */
5281 		m->rssi.low_threshold = -127;
5282 		m->rssi.low_threshold_timeout = 60;
5283 		m->rssi.high_threshold = -127;
5284 		m->rssi.high_threshold_timeout = 0;
5285 		m->rssi.sampling_period = 0;
5286 	}
5287 }
5288 
5289 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5290 				    struct mgmt_adv_pattern *patterns)
5291 {
5292 	u8 offset = 0, length = 0;
5293 	struct adv_pattern *p = NULL;
5294 	int i;
5295 
5296 	for (i = 0; i < pattern_count; i++) {
5297 		offset = patterns[i].offset;
5298 		length = patterns[i].length;
5299 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5300 		    length > HCI_MAX_EXT_AD_LENGTH ||
5301 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5302 			return MGMT_STATUS_INVALID_PARAMS;
5303 
5304 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5305 		if (!p)
5306 			return MGMT_STATUS_NO_RESOURCES;
5307 
5308 		p->ad_type = patterns[i].ad_type;
5309 		p->offset = patterns[i].offset;
5310 		p->length = patterns[i].length;
5311 		memcpy(p->value, patterns[i].value, p->length);
5312 
5313 		INIT_LIST_HEAD(&p->list);
5314 		list_add(&p->list, &m->patterns);
5315 	}
5316 
5317 	return MGMT_STATUS_SUCCESS;
5318 }
5319 
5320 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5321 				    void *data, u16 len)
5322 {
5323 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5324 	struct adv_monitor *m = NULL;
5325 	u8 status = MGMT_STATUS_SUCCESS;
5326 	size_t expected_size = sizeof(*cp);
5327 
5328 	BT_DBG("request for %s", hdev->name);
5329 
5330 	if (len <= sizeof(*cp)) {
5331 		status = MGMT_STATUS_INVALID_PARAMS;
5332 		goto done;
5333 	}
5334 
5335 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5336 	if (len != expected_size) {
5337 		status = MGMT_STATUS_INVALID_PARAMS;
5338 		goto done;
5339 	}
5340 
5341 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5342 	if (!m) {
5343 		status = MGMT_STATUS_NO_RESOURCES;
5344 		goto done;
5345 	}
5346 
5347 	INIT_LIST_HEAD(&m->patterns);
5348 
5349 	parse_adv_monitor_rssi(m, NULL);
5350 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5351 
5352 done:
5353 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5354 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5355 }
5356 
5357 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5358 					 void *data, u16 len)
5359 {
5360 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5361 	struct adv_monitor *m = NULL;
5362 	u8 status = MGMT_STATUS_SUCCESS;
5363 	size_t expected_size = sizeof(*cp);
5364 
5365 	BT_DBG("request for %s", hdev->name);
5366 
5367 	if (len <= sizeof(*cp)) {
5368 		status = MGMT_STATUS_INVALID_PARAMS;
5369 		goto done;
5370 	}
5371 
5372 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5373 	if (len != expected_size) {
5374 		status = MGMT_STATUS_INVALID_PARAMS;
5375 		goto done;
5376 	}
5377 
5378 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5379 	if (!m) {
5380 		status = MGMT_STATUS_NO_RESOURCES;
5381 		goto done;
5382 	}
5383 
5384 	INIT_LIST_HEAD(&m->patterns);
5385 
5386 	parse_adv_monitor_rssi(m, &cp->rssi);
5387 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5388 
5389 done:
5390 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5391 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5392 }
5393 
5394 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5395 					     void *data, int status)
5396 {
5397 	struct mgmt_rp_remove_adv_monitor rp;
5398 	struct mgmt_pending_cmd *cmd = data;
5399 	struct mgmt_cp_remove_adv_monitor *cp;
5400 
5401 	if (status == -ECANCELED ||
5402 	    cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5403 		return;
5404 
5405 	hci_dev_lock(hdev);
5406 
5407 	cp = cmd->param;
5408 
5409 	rp.monitor_handle = cp->monitor_handle;
5410 
5411 	if (!status)
5412 		hci_update_passive_scan(hdev);
5413 
5414 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5415 			  mgmt_status(status), &rp, sizeof(rp));
5416 	mgmt_pending_remove(cmd);
5417 
5418 	hci_dev_unlock(hdev);
5419 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5420 		   rp.monitor_handle, status);
5421 }
5422 
5423 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5424 {
5425 	struct mgmt_pending_cmd *cmd = data;
5426 
5427 	if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev))
5428 		return -ECANCELED;
5429 
5430 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5431 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5432 
5433 	if (!handle)
5434 		return hci_remove_all_adv_monitor(hdev);
5435 
5436 	return hci_remove_single_adv_monitor(hdev, handle);
5437 }
5438 
5439 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5440 			      void *data, u16 len)
5441 {
5442 	struct mgmt_pending_cmd *cmd;
5443 	int err, status;
5444 
5445 	hci_dev_lock(hdev);
5446 
5447 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5448 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5449 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5450 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5451 		status = MGMT_STATUS_BUSY;
5452 		goto unlock;
5453 	}
5454 
5455 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5456 	if (!cmd) {
5457 		status = MGMT_STATUS_NO_RESOURCES;
5458 		goto unlock;
5459 	}
5460 
5461 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5462 				  mgmt_remove_adv_monitor_complete);
5463 
5464 	if (err) {
5465 		mgmt_pending_remove(cmd);
5466 
5467 		if (err == -ENOMEM)
5468 			status = MGMT_STATUS_NO_RESOURCES;
5469 		else
5470 			status = MGMT_STATUS_FAILED;
5471 
5472 		goto unlock;
5473 	}
5474 
5475 	hci_dev_unlock(hdev);
5476 
5477 	return 0;
5478 
5479 unlock:
5480 	hci_dev_unlock(hdev);
5481 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5482 			       status);
5483 }
5484 
5485 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5486 {
5487 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5488 	size_t rp_size = sizeof(mgmt_rp);
5489 	struct mgmt_pending_cmd *cmd = data;
5490 	struct sk_buff *skb = cmd->skb;
5491 	u8 status = mgmt_status(err);
5492 
5493 	if (!status) {
5494 		if (!skb)
5495 			status = MGMT_STATUS_FAILED;
5496 		else if (IS_ERR(skb))
5497 			status = mgmt_status(PTR_ERR(skb));
5498 		else
5499 			status = mgmt_status(skb->data[0]);
5500 	}
5501 
5502 	bt_dev_dbg(hdev, "status %d", status);
5503 
5504 	if (status) {
5505 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5506 		goto remove;
5507 	}
5508 
5509 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5510 
5511 	if (!bredr_sc_enabled(hdev)) {
5512 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5513 
5514 		if (skb->len < sizeof(*rp)) {
5515 			mgmt_cmd_status(cmd->sk, hdev->id,
5516 					MGMT_OP_READ_LOCAL_OOB_DATA,
5517 					MGMT_STATUS_FAILED);
5518 			goto remove;
5519 		}
5520 
5521 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5522 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5523 
5524 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5525 	} else {
5526 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5527 
5528 		if (skb->len < sizeof(*rp)) {
5529 			mgmt_cmd_status(cmd->sk, hdev->id,
5530 					MGMT_OP_READ_LOCAL_OOB_DATA,
5531 					MGMT_STATUS_FAILED);
5532 			goto remove;
5533 		}
5534 
5535 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5536 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5537 
5538 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5539 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5540 	}
5541 
5542 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5543 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5544 
5545 remove:
5546 	if (skb && !IS_ERR(skb))
5547 		kfree_skb(skb);
5548 
5549 	mgmt_pending_free(cmd);
5550 }
5551 
5552 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5553 {
5554 	struct mgmt_pending_cmd *cmd = data;
5555 
5556 	if (bredr_sc_enabled(hdev))
5557 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5558 	else
5559 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5560 
5561 	if (IS_ERR(cmd->skb))
5562 		return PTR_ERR(cmd->skb);
5563 	else
5564 		return 0;
5565 }
5566 
5567 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5568 			       void *data, u16 data_len)
5569 {
5570 	struct mgmt_pending_cmd *cmd;
5571 	int err;
5572 
5573 	bt_dev_dbg(hdev, "sock %p", sk);
5574 
5575 	hci_dev_lock(hdev);
5576 
5577 	if (!hdev_is_powered(hdev)) {
5578 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5579 				      MGMT_STATUS_NOT_POWERED);
5580 		goto unlock;
5581 	}
5582 
5583 	if (!lmp_ssp_capable(hdev)) {
5584 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5585 				      MGMT_STATUS_NOT_SUPPORTED);
5586 		goto unlock;
5587 	}
5588 
5589 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5590 	if (!cmd)
5591 		err = -ENOMEM;
5592 	else
5593 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5594 					 read_local_oob_data_complete);
5595 
5596 	if (err < 0) {
5597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5598 				      MGMT_STATUS_FAILED);
5599 
5600 		if (cmd)
5601 			mgmt_pending_free(cmd);
5602 	}
5603 
5604 unlock:
5605 	hci_dev_unlock(hdev);
5606 	return err;
5607 }
5608 
5609 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5610 			       void *data, u16 len)
5611 {
5612 	struct mgmt_addr_info *addr = data;
5613 	int err;
5614 
5615 	bt_dev_dbg(hdev, "sock %p", sk);
5616 
5617 	if (!bdaddr_type_is_valid(addr->type))
5618 		return mgmt_cmd_complete(sk, hdev->id,
5619 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5620 					 MGMT_STATUS_INVALID_PARAMS,
5621 					 addr, sizeof(*addr));
5622 
5623 	hci_dev_lock(hdev);
5624 
5625 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5626 		struct mgmt_cp_add_remote_oob_data *cp = data;
5627 		u8 status;
5628 
5629 		if (cp->addr.type != BDADDR_BREDR) {
5630 			err = mgmt_cmd_complete(sk, hdev->id,
5631 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5632 						MGMT_STATUS_INVALID_PARAMS,
5633 						&cp->addr, sizeof(cp->addr));
5634 			goto unlock;
5635 		}
5636 
5637 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5638 					      cp->addr.type, cp->hash,
5639 					      cp->rand, NULL, NULL);
5640 		if (err < 0)
5641 			status = MGMT_STATUS_FAILED;
5642 		else
5643 			status = MGMT_STATUS_SUCCESS;
5644 
5645 		err = mgmt_cmd_complete(sk, hdev->id,
5646 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5647 					&cp->addr, sizeof(cp->addr));
5648 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5649 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5650 		u8 *rand192, *hash192, *rand256, *hash256;
5651 		u8 status;
5652 
5653 		if (bdaddr_type_is_le(cp->addr.type)) {
5654 			/* Enforce zero-valued 192-bit parameters as
5655 			 * long as legacy SMP OOB isn't implemented.
5656 			 */
5657 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5658 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5659 				err = mgmt_cmd_complete(sk, hdev->id,
5660 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5661 							MGMT_STATUS_INVALID_PARAMS,
5662 							addr, sizeof(*addr));
5663 				goto unlock;
5664 			}
5665 
5666 			rand192 = NULL;
5667 			hash192 = NULL;
5668 		} else {
5669 			/* In case one of the P-192 values is set to zero,
5670 			 * then just disable OOB data for P-192.
5671 			 */
5672 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5673 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5674 				rand192 = NULL;
5675 				hash192 = NULL;
5676 			} else {
5677 				rand192 = cp->rand192;
5678 				hash192 = cp->hash192;
5679 			}
5680 		}
5681 
5682 		/* In case one of the P-256 values is set to zero, then just
5683 		 * disable OOB data for P-256.
5684 		 */
5685 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5686 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5687 			rand256 = NULL;
5688 			hash256 = NULL;
5689 		} else {
5690 			rand256 = cp->rand256;
5691 			hash256 = cp->hash256;
5692 		}
5693 
5694 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5695 					      cp->addr.type, hash192, rand192,
5696 					      hash256, rand256);
5697 		if (err < 0)
5698 			status = MGMT_STATUS_FAILED;
5699 		else
5700 			status = MGMT_STATUS_SUCCESS;
5701 
5702 		err = mgmt_cmd_complete(sk, hdev->id,
5703 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5704 					status, &cp->addr, sizeof(cp->addr));
5705 	} else {
5706 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5707 			   len);
5708 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5709 				      MGMT_STATUS_INVALID_PARAMS);
5710 	}
5711 
5712 unlock:
5713 	hci_dev_unlock(hdev);
5714 	return err;
5715 }
5716 
5717 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5718 				  void *data, u16 len)
5719 {
5720 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5721 	u8 status;
5722 	int err;
5723 
5724 	bt_dev_dbg(hdev, "sock %p", sk);
5725 
5726 	if (cp->addr.type != BDADDR_BREDR)
5727 		return mgmt_cmd_complete(sk, hdev->id,
5728 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5729 					 MGMT_STATUS_INVALID_PARAMS,
5730 					 &cp->addr, sizeof(cp->addr));
5731 
5732 	hci_dev_lock(hdev);
5733 
5734 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5735 		hci_remote_oob_data_clear(hdev);
5736 		status = MGMT_STATUS_SUCCESS;
5737 		goto done;
5738 	}
5739 
5740 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5741 	if (err < 0)
5742 		status = MGMT_STATUS_INVALID_PARAMS;
5743 	else
5744 		status = MGMT_STATUS_SUCCESS;
5745 
5746 done:
5747 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5748 				status, &cp->addr, sizeof(cp->addr));
5749 
5750 	hci_dev_unlock(hdev);
5751 	return err;
5752 }
5753 
5754 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5755 				    uint8_t *mgmt_status)
5756 {
5757 	switch (type) {
5758 	case DISCOV_TYPE_LE:
5759 		*mgmt_status = mgmt_le_support(hdev);
5760 		if (*mgmt_status)
5761 			return false;
5762 		break;
5763 	case DISCOV_TYPE_INTERLEAVED:
5764 		*mgmt_status = mgmt_le_support(hdev);
5765 		if (*mgmt_status)
5766 			return false;
5767 		fallthrough;
5768 	case DISCOV_TYPE_BREDR:
5769 		*mgmt_status = mgmt_bredr_support(hdev);
5770 		if (*mgmt_status)
5771 			return false;
5772 		break;
5773 	default:
5774 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5775 		return false;
5776 	}
5777 
5778 	return true;
5779 }
5780 
5781 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5782 {
5783 	struct mgmt_pending_cmd *cmd = data;
5784 
5785 	bt_dev_dbg(hdev, "err %d", err);
5786 
5787 	if (err == -ECANCELED)
5788 		return;
5789 
5790 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5791 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5792 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5793 		return;
5794 
5795 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5796 			  cmd->param, 1);
5797 	mgmt_pending_remove(cmd);
5798 
5799 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5800 				DISCOVERY_FINDING);
5801 }
5802 
5803 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5804 {
5805 	return hci_start_discovery_sync(hdev);
5806 }
5807 
5808 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5809 				    u16 op, void *data, u16 len)
5810 {
5811 	struct mgmt_cp_start_discovery *cp = data;
5812 	struct mgmt_pending_cmd *cmd;
5813 	u8 status;
5814 	int err;
5815 
5816 	bt_dev_dbg(hdev, "sock %p", sk);
5817 
5818 	hci_dev_lock(hdev);
5819 
5820 	if (!hdev_is_powered(hdev)) {
5821 		err = mgmt_cmd_complete(sk, hdev->id, op,
5822 					MGMT_STATUS_NOT_POWERED,
5823 					&cp->type, sizeof(cp->type));
5824 		goto failed;
5825 	}
5826 
5827 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5828 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5829 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5830 					&cp->type, sizeof(cp->type));
5831 		goto failed;
5832 	}
5833 
5834 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5835 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5836 					&cp->type, sizeof(cp->type));
5837 		goto failed;
5838 	}
5839 
5840 	/* Can't start discovery when it is paused */
5841 	if (hdev->discovery_paused) {
5842 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5843 					&cp->type, sizeof(cp->type));
5844 		goto failed;
5845 	}
5846 
5847 	/* Clear the discovery filter first to free any previously
5848 	 * allocated memory for the UUID list.
5849 	 */
5850 	hci_discovery_filter_clear(hdev);
5851 
5852 	hdev->discovery.type = cp->type;
5853 	hdev->discovery.report_invalid_rssi = false;
5854 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5855 		hdev->discovery.limited = true;
5856 	else
5857 		hdev->discovery.limited = false;
5858 
5859 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5860 	if (!cmd) {
5861 		err = -ENOMEM;
5862 		goto failed;
5863 	}
5864 
5865 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5866 				 start_discovery_complete);
5867 	if (err < 0) {
5868 		mgmt_pending_remove(cmd);
5869 		goto failed;
5870 	}
5871 
5872 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5873 
5874 failed:
5875 	hci_dev_unlock(hdev);
5876 	return err;
5877 }
5878 
5879 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5880 			   void *data, u16 len)
5881 {
5882 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5883 					data, len);
5884 }
5885 
5886 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5887 				   void *data, u16 len)
5888 {
5889 	return start_discovery_internal(sk, hdev,
5890 					MGMT_OP_START_LIMITED_DISCOVERY,
5891 					data, len);
5892 }
5893 
5894 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5895 				   void *data, u16 len)
5896 {
5897 	struct mgmt_cp_start_service_discovery *cp = data;
5898 	struct mgmt_pending_cmd *cmd;
5899 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5900 	u16 uuid_count, expected_len;
5901 	u8 status;
5902 	int err;
5903 
5904 	bt_dev_dbg(hdev, "sock %p", sk);
5905 
5906 	hci_dev_lock(hdev);
5907 
5908 	if (!hdev_is_powered(hdev)) {
5909 		err = mgmt_cmd_complete(sk, hdev->id,
5910 					MGMT_OP_START_SERVICE_DISCOVERY,
5911 					MGMT_STATUS_NOT_POWERED,
5912 					&cp->type, sizeof(cp->type));
5913 		goto failed;
5914 	}
5915 
5916 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5917 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5918 		err = mgmt_cmd_complete(sk, hdev->id,
5919 					MGMT_OP_START_SERVICE_DISCOVERY,
5920 					MGMT_STATUS_BUSY, &cp->type,
5921 					sizeof(cp->type));
5922 		goto failed;
5923 	}
5924 
5925 	if (hdev->discovery_paused) {
5926 		err = mgmt_cmd_complete(sk, hdev->id,
5927 					MGMT_OP_START_SERVICE_DISCOVERY,
5928 					MGMT_STATUS_BUSY, &cp->type,
5929 					sizeof(cp->type));
5930 		goto failed;
5931 	}
5932 
5933 	uuid_count = __le16_to_cpu(cp->uuid_count);
5934 	if (uuid_count > max_uuid_count) {
5935 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5936 			   uuid_count);
5937 		err = mgmt_cmd_complete(sk, hdev->id,
5938 					MGMT_OP_START_SERVICE_DISCOVERY,
5939 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5940 					sizeof(cp->type));
5941 		goto failed;
5942 	}
5943 
5944 	expected_len = sizeof(*cp) + uuid_count * 16;
5945 	if (expected_len != len) {
5946 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5947 			   expected_len, len);
5948 		err = mgmt_cmd_complete(sk, hdev->id,
5949 					MGMT_OP_START_SERVICE_DISCOVERY,
5950 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5951 					sizeof(cp->type));
5952 		goto failed;
5953 	}
5954 
5955 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5956 		err = mgmt_cmd_complete(sk, hdev->id,
5957 					MGMT_OP_START_SERVICE_DISCOVERY,
5958 					status, &cp->type, sizeof(cp->type));
5959 		goto failed;
5960 	}
5961 
5962 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5963 			       hdev, data, len);
5964 	if (!cmd) {
5965 		err = -ENOMEM;
5966 		goto failed;
5967 	}
5968 
5969 	/* Clear the discovery filter first to free any previously
5970 	 * allocated memory for the UUID list.
5971 	 */
5972 	hci_discovery_filter_clear(hdev);
5973 
5974 	hdev->discovery.result_filtering = true;
5975 	hdev->discovery.type = cp->type;
5976 	hdev->discovery.rssi = cp->rssi;
5977 	hdev->discovery.uuid_count = uuid_count;
5978 
5979 	if (uuid_count > 0) {
5980 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5981 						GFP_KERNEL);
5982 		if (!hdev->discovery.uuids) {
5983 			err = mgmt_cmd_complete(sk, hdev->id,
5984 						MGMT_OP_START_SERVICE_DISCOVERY,
5985 						MGMT_STATUS_FAILED,
5986 						&cp->type, sizeof(cp->type));
5987 			mgmt_pending_remove(cmd);
5988 			goto failed;
5989 		}
5990 	}
5991 
5992 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5993 				 start_discovery_complete);
5994 	if (err < 0) {
5995 		mgmt_pending_remove(cmd);
5996 		goto failed;
5997 	}
5998 
5999 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6000 
6001 failed:
6002 	hci_dev_unlock(hdev);
6003 	return err;
6004 }
6005 
6006 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6007 {
6008 	struct mgmt_pending_cmd *cmd = data;
6009 
6010 	if (err == -ECANCELED ||
6011 	    cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6012 		return;
6013 
6014 	bt_dev_dbg(hdev, "err %d", err);
6015 
6016 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6017 			  cmd->param, 1);
6018 	mgmt_pending_remove(cmd);
6019 
6020 	if (!err)
6021 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6022 }
6023 
6024 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6025 {
6026 	return hci_stop_discovery_sync(hdev);
6027 }
6028 
6029 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6030 			  u16 len)
6031 {
6032 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6033 	struct mgmt_pending_cmd *cmd;
6034 	int err;
6035 
6036 	bt_dev_dbg(hdev, "sock %p", sk);
6037 
6038 	hci_dev_lock(hdev);
6039 
6040 	if (!hci_discovery_active(hdev)) {
6041 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6042 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6043 					sizeof(mgmt_cp->type));
6044 		goto unlock;
6045 	}
6046 
6047 	if (hdev->discovery.type != mgmt_cp->type) {
6048 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6049 					MGMT_STATUS_INVALID_PARAMS,
6050 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6051 		goto unlock;
6052 	}
6053 
6054 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6055 	if (!cmd) {
6056 		err = -ENOMEM;
6057 		goto unlock;
6058 	}
6059 
6060 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6061 				 stop_discovery_complete);
6062 	if (err < 0) {
6063 		mgmt_pending_remove(cmd);
6064 		goto unlock;
6065 	}
6066 
6067 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6068 
6069 unlock:
6070 	hci_dev_unlock(hdev);
6071 	return err;
6072 }
6073 
6074 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6075 			u16 len)
6076 {
6077 	struct mgmt_cp_confirm_name *cp = data;
6078 	struct inquiry_entry *e;
6079 	int err;
6080 
6081 	bt_dev_dbg(hdev, "sock %p", sk);
6082 
6083 	hci_dev_lock(hdev);
6084 
6085 	if (!hci_discovery_active(hdev)) {
6086 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6087 					MGMT_STATUS_FAILED, &cp->addr,
6088 					sizeof(cp->addr));
6089 		goto failed;
6090 	}
6091 
6092 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6093 	if (!e) {
6094 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6095 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6096 					sizeof(cp->addr));
6097 		goto failed;
6098 	}
6099 
6100 	if (cp->name_known) {
6101 		e->name_state = NAME_KNOWN;
6102 		list_del(&e->list);
6103 	} else {
6104 		e->name_state = NAME_NEEDED;
6105 		hci_inquiry_cache_update_resolve(hdev, e);
6106 	}
6107 
6108 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6109 				&cp->addr, sizeof(cp->addr));
6110 
6111 failed:
6112 	hci_dev_unlock(hdev);
6113 	return err;
6114 }
6115 
6116 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6117 			u16 len)
6118 {
6119 	struct mgmt_cp_block_device *cp = data;
6120 	u8 status;
6121 	int err;
6122 
6123 	bt_dev_dbg(hdev, "sock %p", sk);
6124 
6125 	if (!bdaddr_type_is_valid(cp->addr.type))
6126 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6127 					 MGMT_STATUS_INVALID_PARAMS,
6128 					 &cp->addr, sizeof(cp->addr));
6129 
6130 	hci_dev_lock(hdev);
6131 
6132 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6133 				  cp->addr.type);
6134 	if (err < 0) {
6135 		status = MGMT_STATUS_FAILED;
6136 		goto done;
6137 	}
6138 
6139 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6140 		   sk);
6141 	status = MGMT_STATUS_SUCCESS;
6142 
6143 done:
6144 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6145 				&cp->addr, sizeof(cp->addr));
6146 
6147 	hci_dev_unlock(hdev);
6148 
6149 	return err;
6150 }
6151 
6152 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6153 			  u16 len)
6154 {
6155 	struct mgmt_cp_unblock_device *cp = data;
6156 	u8 status;
6157 	int err;
6158 
6159 	bt_dev_dbg(hdev, "sock %p", sk);
6160 
6161 	if (!bdaddr_type_is_valid(cp->addr.type))
6162 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6163 					 MGMT_STATUS_INVALID_PARAMS,
6164 					 &cp->addr, sizeof(cp->addr));
6165 
6166 	hci_dev_lock(hdev);
6167 
6168 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6169 				  cp->addr.type);
6170 	if (err < 0) {
6171 		status = MGMT_STATUS_INVALID_PARAMS;
6172 		goto done;
6173 	}
6174 
6175 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6176 		   sk);
6177 	status = MGMT_STATUS_SUCCESS;
6178 
6179 done:
6180 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6181 				&cp->addr, sizeof(cp->addr));
6182 
6183 	hci_dev_unlock(hdev);
6184 
6185 	return err;
6186 }
6187 
6188 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6189 {
6190 	return hci_update_eir_sync(hdev);
6191 }
6192 
6193 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6194 			 u16 len)
6195 {
6196 	struct mgmt_cp_set_device_id *cp = data;
6197 	int err;
6198 	__u16 source;
6199 
6200 	bt_dev_dbg(hdev, "sock %p", sk);
6201 
6202 	source = __le16_to_cpu(cp->source);
6203 
6204 	if (source > 0x0002)
6205 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6206 				       MGMT_STATUS_INVALID_PARAMS);
6207 
6208 	hci_dev_lock(hdev);
6209 
6210 	hdev->devid_source = source;
6211 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6212 	hdev->devid_product = __le16_to_cpu(cp->product);
6213 	hdev->devid_version = __le16_to_cpu(cp->version);
6214 
6215 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6216 				NULL, 0);
6217 
6218 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6219 
6220 	hci_dev_unlock(hdev);
6221 
6222 	return err;
6223 }
6224 
6225 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6226 {
6227 	if (err)
6228 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6229 	else
6230 		bt_dev_dbg(hdev, "status %d", err);
6231 }
6232 
6233 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6234 {
6235 	struct cmd_lookup match = { NULL, hdev };
6236 	u8 instance;
6237 	struct adv_info *adv_instance;
6238 	u8 status = mgmt_status(err);
6239 
6240 	if (status) {
6241 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6242 				     cmd_status_rsp, &status);
6243 		return;
6244 	}
6245 
6246 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6247 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6248 	else
6249 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6250 
6251 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6252 			     &match);
6253 
6254 	new_settings(hdev, match.sk);
6255 
6256 	if (match.sk)
6257 		sock_put(match.sk);
6258 
6259 	/* If "Set Advertising" was just disabled and instance advertising was
6260 	 * set up earlier, then re-enable multi-instance advertising.
6261 	 */
6262 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6263 	    list_empty(&hdev->adv_instances))
6264 		return;
6265 
6266 	instance = hdev->cur_adv_instance;
6267 	if (!instance) {
6268 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6269 							struct adv_info, list);
6270 		if (!adv_instance)
6271 			return;
6272 
6273 		instance = adv_instance->instance;
6274 	}
6275 
6276 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6277 
6278 	enable_advertising_instance(hdev, err);
6279 }
6280 
6281 static int set_adv_sync(struct hci_dev *hdev, void *data)
6282 {
6283 	struct mgmt_pending_cmd *cmd = data;
6284 	struct mgmt_mode *cp = cmd->param;
6285 	u8 val = !!cp->val;
6286 
6287 	if (cp->val == 0x02)
6288 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6289 	else
6290 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6291 
6292 	cancel_adv_timeout(hdev);
6293 
6294 	if (val) {
6295 		/* Switch to instance "0" for the Set Advertising setting.
6296 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6297 		 * HCI_ADVERTISING flag is not yet set.
6298 		 */
6299 		hdev->cur_adv_instance = 0x00;
6300 
6301 		if (ext_adv_capable(hdev)) {
6302 			hci_start_ext_adv_sync(hdev, 0x00);
6303 		} else {
6304 			hci_update_adv_data_sync(hdev, 0x00);
6305 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6306 			hci_enable_advertising_sync(hdev);
6307 		}
6308 	} else {
6309 		hci_disable_advertising_sync(hdev);
6310 	}
6311 
6312 	return 0;
6313 }
6314 
6315 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6316 			   u16 len)
6317 {
6318 	struct mgmt_mode *cp = data;
6319 	struct mgmt_pending_cmd *cmd;
6320 	u8 val, status;
6321 	int err;
6322 
6323 	bt_dev_dbg(hdev, "sock %p", sk);
6324 
6325 	status = mgmt_le_support(hdev);
6326 	if (status)
6327 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6328 				       status);
6329 
6330 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6332 				       MGMT_STATUS_INVALID_PARAMS);
6333 
6334 	if (hdev->advertising_paused)
6335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6336 				       MGMT_STATUS_BUSY);
6337 
6338 	hci_dev_lock(hdev);
6339 
6340 	val = !!cp->val;
6341 
6342 	/* The following conditions are ones which mean that we should
6343 	 * not do any HCI communication but directly send a mgmt
6344 	 * response to user space (after toggling the flag if
6345 	 * necessary).
6346 	 */
6347 	if (!hdev_is_powered(hdev) ||
6348 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6349 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6350 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6351 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6352 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6353 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6354 		bool changed;
6355 
6356 		if (cp->val) {
6357 			hdev->cur_adv_instance = 0x00;
6358 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6359 			if (cp->val == 0x02)
6360 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6361 			else
6362 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6363 		} else {
6364 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6365 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6366 		}
6367 
6368 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6369 		if (err < 0)
6370 			goto unlock;
6371 
6372 		if (changed)
6373 			err = new_settings(hdev, sk);
6374 
6375 		goto unlock;
6376 	}
6377 
6378 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6379 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6380 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6381 				      MGMT_STATUS_BUSY);
6382 		goto unlock;
6383 	}
6384 
6385 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6386 	if (!cmd)
6387 		err = -ENOMEM;
6388 	else
6389 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6390 					 set_advertising_complete);
6391 
6392 	if (err < 0 && cmd)
6393 		mgmt_pending_remove(cmd);
6394 
6395 unlock:
6396 	hci_dev_unlock(hdev);
6397 	return err;
6398 }
6399 
6400 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6401 			      void *data, u16 len)
6402 {
6403 	struct mgmt_cp_set_static_address *cp = data;
6404 	int err;
6405 
6406 	bt_dev_dbg(hdev, "sock %p", sk);
6407 
6408 	if (!lmp_le_capable(hdev))
6409 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6410 				       MGMT_STATUS_NOT_SUPPORTED);
6411 
6412 	if (hdev_is_powered(hdev))
6413 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6414 				       MGMT_STATUS_REJECTED);
6415 
6416 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6417 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6418 			return mgmt_cmd_status(sk, hdev->id,
6419 					       MGMT_OP_SET_STATIC_ADDRESS,
6420 					       MGMT_STATUS_INVALID_PARAMS);
6421 
6422 		/* Two most significant bits shall be set */
6423 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6424 			return mgmt_cmd_status(sk, hdev->id,
6425 					       MGMT_OP_SET_STATIC_ADDRESS,
6426 					       MGMT_STATUS_INVALID_PARAMS);
6427 	}
6428 
6429 	hci_dev_lock(hdev);
6430 
6431 	bacpy(&hdev->static_addr, &cp->bdaddr);
6432 
6433 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6434 	if (err < 0)
6435 		goto unlock;
6436 
6437 	err = new_settings(hdev, sk);
6438 
6439 unlock:
6440 	hci_dev_unlock(hdev);
6441 	return err;
6442 }
6443 
6444 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6445 			   void *data, u16 len)
6446 {
6447 	struct mgmt_cp_set_scan_params *cp = data;
6448 	__u16 interval, window;
6449 	int err;
6450 
6451 	bt_dev_dbg(hdev, "sock %p", sk);
6452 
6453 	if (!lmp_le_capable(hdev))
6454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6455 				       MGMT_STATUS_NOT_SUPPORTED);
6456 
6457 	interval = __le16_to_cpu(cp->interval);
6458 
6459 	if (interval < 0x0004 || interval > 0x4000)
6460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6461 				       MGMT_STATUS_INVALID_PARAMS);
6462 
6463 	window = __le16_to_cpu(cp->window);
6464 
6465 	if (window < 0x0004 || window > 0x4000)
6466 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6467 				       MGMT_STATUS_INVALID_PARAMS);
6468 
6469 	if (window > interval)
6470 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6471 				       MGMT_STATUS_INVALID_PARAMS);
6472 
6473 	hci_dev_lock(hdev);
6474 
6475 	hdev->le_scan_interval = interval;
6476 	hdev->le_scan_window = window;
6477 
6478 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6479 				NULL, 0);
6480 
6481 	/* If background scan is running, restart it so new parameters are
6482 	 * loaded.
6483 	 */
6484 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6485 	    hdev->discovery.state == DISCOVERY_STOPPED)
6486 		hci_update_passive_scan(hdev);
6487 
6488 	hci_dev_unlock(hdev);
6489 
6490 	return err;
6491 }
6492 
6493 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6494 {
6495 	struct mgmt_pending_cmd *cmd = data;
6496 
6497 	bt_dev_dbg(hdev, "err %d", err);
6498 
6499 	if (err) {
6500 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6501 				mgmt_status(err));
6502 	} else {
6503 		struct mgmt_mode *cp = cmd->param;
6504 
6505 		if (cp->val)
6506 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6507 		else
6508 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6509 
6510 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6511 		new_settings(hdev, cmd->sk);
6512 	}
6513 
6514 	mgmt_pending_free(cmd);
6515 }
6516 
6517 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6518 {
6519 	struct mgmt_pending_cmd *cmd = data;
6520 	struct mgmt_mode *cp = cmd->param;
6521 
6522 	return hci_write_fast_connectable_sync(hdev, cp->val);
6523 }
6524 
6525 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6526 				void *data, u16 len)
6527 {
6528 	struct mgmt_mode *cp = data;
6529 	struct mgmt_pending_cmd *cmd;
6530 	int err;
6531 
6532 	bt_dev_dbg(hdev, "sock %p", sk);
6533 
6534 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6535 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6536 		return mgmt_cmd_status(sk, hdev->id,
6537 				       MGMT_OP_SET_FAST_CONNECTABLE,
6538 				       MGMT_STATUS_NOT_SUPPORTED);
6539 
6540 	if (cp->val != 0x00 && cp->val != 0x01)
6541 		return mgmt_cmd_status(sk, hdev->id,
6542 				       MGMT_OP_SET_FAST_CONNECTABLE,
6543 				       MGMT_STATUS_INVALID_PARAMS);
6544 
6545 	hci_dev_lock(hdev);
6546 
6547 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6548 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6549 		goto unlock;
6550 	}
6551 
6552 	if (!hdev_is_powered(hdev)) {
6553 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6554 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6555 		new_settings(hdev, sk);
6556 		goto unlock;
6557 	}
6558 
6559 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6560 			       len);
6561 	if (!cmd)
6562 		err = -ENOMEM;
6563 	else
6564 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6565 					 fast_connectable_complete);
6566 
6567 	if (err < 0) {
6568 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6569 				MGMT_STATUS_FAILED);
6570 
6571 		if (cmd)
6572 			mgmt_pending_free(cmd);
6573 	}
6574 
6575 unlock:
6576 	hci_dev_unlock(hdev);
6577 
6578 	return err;
6579 }
6580 
6581 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6582 {
6583 	struct mgmt_pending_cmd *cmd = data;
6584 
6585 	bt_dev_dbg(hdev, "err %d", err);
6586 
6587 	if (err) {
6588 		u8 mgmt_err = mgmt_status(err);
6589 
6590 		/* We need to restore the flag if related HCI commands
6591 		 * failed.
6592 		 */
6593 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6594 
6595 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6596 	} else {
6597 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6598 		new_settings(hdev, cmd->sk);
6599 	}
6600 
6601 	mgmt_pending_free(cmd);
6602 }
6603 
6604 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6605 {
6606 	int status;
6607 
6608 	status = hci_write_fast_connectable_sync(hdev, false);
6609 
6610 	if (!status)
6611 		status = hci_update_scan_sync(hdev);
6612 
6613 	/* Since only the advertising data flags will change, there
6614 	 * is no need to update the scan response data.
6615 	 */
6616 	if (!status)
6617 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6618 
6619 	return status;
6620 }
6621 
6622 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6623 {
6624 	struct mgmt_mode *cp = data;
6625 	struct mgmt_pending_cmd *cmd;
6626 	int err;
6627 
6628 	bt_dev_dbg(hdev, "sock %p", sk);
6629 
6630 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6631 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6632 				       MGMT_STATUS_NOT_SUPPORTED);
6633 
6634 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6635 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6636 				       MGMT_STATUS_REJECTED);
6637 
6638 	if (cp->val != 0x00 && cp->val != 0x01)
6639 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6640 				       MGMT_STATUS_INVALID_PARAMS);
6641 
6642 	hci_dev_lock(hdev);
6643 
6644 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6645 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6646 		goto unlock;
6647 	}
6648 
6649 	if (!hdev_is_powered(hdev)) {
6650 		if (!cp->val) {
6651 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6652 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6653 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6654 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6655 		}
6656 
6657 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6658 
6659 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6660 		if (err < 0)
6661 			goto unlock;
6662 
6663 		err = new_settings(hdev, sk);
6664 		goto unlock;
6665 	}
6666 
6667 	/* Reject disabling when powered on */
6668 	if (!cp->val) {
6669 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6670 				      MGMT_STATUS_REJECTED);
6671 		goto unlock;
6672 	} else {
6673 		/* When configuring a dual-mode controller to operate
6674 		 * with LE only and using a static address, then switching
6675 		 * BR/EDR back on is not allowed.
6676 		 *
6677 		 * Dual-mode controllers shall operate with the public
6678 		 * address as its identity address for BR/EDR and LE. So
6679 		 * reject the attempt to create an invalid configuration.
6680 		 *
6681 		 * The same restrictions applies when secure connections
6682 		 * has been enabled. For BR/EDR this is a controller feature
6683 		 * while for LE it is a host stack feature. This means that
6684 		 * switching BR/EDR back on when secure connections has been
6685 		 * enabled is not a supported transaction.
6686 		 */
6687 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6688 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6689 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6690 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6691 					      MGMT_STATUS_REJECTED);
6692 			goto unlock;
6693 		}
6694 	}
6695 
6696 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6697 	if (!cmd)
6698 		err = -ENOMEM;
6699 	else
6700 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6701 					 set_bredr_complete);
6702 
6703 	if (err < 0) {
6704 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6705 				MGMT_STATUS_FAILED);
6706 		if (cmd)
6707 			mgmt_pending_free(cmd);
6708 
6709 		goto unlock;
6710 	}
6711 
6712 	/* We need to flip the bit already here so that
6713 	 * hci_req_update_adv_data generates the correct flags.
6714 	 */
6715 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6716 
6717 unlock:
6718 	hci_dev_unlock(hdev);
6719 	return err;
6720 }
6721 
6722 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6723 {
6724 	struct mgmt_pending_cmd *cmd = data;
6725 	struct mgmt_mode *cp;
6726 
6727 	bt_dev_dbg(hdev, "err %d", err);
6728 
6729 	if (err) {
6730 		u8 mgmt_err = mgmt_status(err);
6731 
6732 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6733 		goto done;
6734 	}
6735 
6736 	cp = cmd->param;
6737 
6738 	switch (cp->val) {
6739 	case 0x00:
6740 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6741 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6742 		break;
6743 	case 0x01:
6744 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6745 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6746 		break;
6747 	case 0x02:
6748 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6749 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6750 		break;
6751 	}
6752 
6753 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6754 	new_settings(hdev, cmd->sk);
6755 
6756 done:
6757 	mgmt_pending_free(cmd);
6758 }
6759 
6760 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6761 {
6762 	struct mgmt_pending_cmd *cmd = data;
6763 	struct mgmt_mode *cp = cmd->param;
6764 	u8 val = !!cp->val;
6765 
6766 	/* Force write of val */
6767 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6768 
6769 	return hci_write_sc_support_sync(hdev, val);
6770 }
6771 
6772 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6773 			   void *data, u16 len)
6774 {
6775 	struct mgmt_mode *cp = data;
6776 	struct mgmt_pending_cmd *cmd;
6777 	u8 val;
6778 	int err;
6779 
6780 	bt_dev_dbg(hdev, "sock %p", sk);
6781 
6782 	if (!lmp_sc_capable(hdev) &&
6783 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6784 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6785 				       MGMT_STATUS_NOT_SUPPORTED);
6786 
6787 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6788 	    lmp_sc_capable(hdev) &&
6789 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6790 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6791 				       MGMT_STATUS_REJECTED);
6792 
6793 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6794 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6795 				       MGMT_STATUS_INVALID_PARAMS);
6796 
6797 	hci_dev_lock(hdev);
6798 
6799 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6800 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6801 		bool changed;
6802 
6803 		if (cp->val) {
6804 			changed = !hci_dev_test_and_set_flag(hdev,
6805 							     HCI_SC_ENABLED);
6806 			if (cp->val == 0x02)
6807 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6808 			else
6809 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6810 		} else {
6811 			changed = hci_dev_test_and_clear_flag(hdev,
6812 							      HCI_SC_ENABLED);
6813 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6814 		}
6815 
6816 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6817 		if (err < 0)
6818 			goto failed;
6819 
6820 		if (changed)
6821 			err = new_settings(hdev, sk);
6822 
6823 		goto failed;
6824 	}
6825 
6826 	val = !!cp->val;
6827 
6828 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6829 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6830 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6831 		goto failed;
6832 	}
6833 
6834 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6835 	if (!cmd)
6836 		err = -ENOMEM;
6837 	else
6838 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6839 					 set_secure_conn_complete);
6840 
6841 	if (err < 0) {
6842 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6843 				MGMT_STATUS_FAILED);
6844 		if (cmd)
6845 			mgmt_pending_free(cmd);
6846 	}
6847 
6848 failed:
6849 	hci_dev_unlock(hdev);
6850 	return err;
6851 }
6852 
6853 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6854 			  void *data, u16 len)
6855 {
6856 	struct mgmt_mode *cp = data;
6857 	bool changed, use_changed;
6858 	int err;
6859 
6860 	bt_dev_dbg(hdev, "sock %p", sk);
6861 
6862 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6864 				       MGMT_STATUS_INVALID_PARAMS);
6865 
6866 	hci_dev_lock(hdev);
6867 
6868 	if (cp->val)
6869 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6870 	else
6871 		changed = hci_dev_test_and_clear_flag(hdev,
6872 						      HCI_KEEP_DEBUG_KEYS);
6873 
6874 	if (cp->val == 0x02)
6875 		use_changed = !hci_dev_test_and_set_flag(hdev,
6876 							 HCI_USE_DEBUG_KEYS);
6877 	else
6878 		use_changed = hci_dev_test_and_clear_flag(hdev,
6879 							  HCI_USE_DEBUG_KEYS);
6880 
6881 	if (hdev_is_powered(hdev) && use_changed &&
6882 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6883 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6884 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6885 			     sizeof(mode), &mode);
6886 	}
6887 
6888 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6889 	if (err < 0)
6890 		goto unlock;
6891 
6892 	if (changed)
6893 		err = new_settings(hdev, sk);
6894 
6895 unlock:
6896 	hci_dev_unlock(hdev);
6897 	return err;
6898 }
6899 
6900 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6901 		       u16 len)
6902 {
6903 	struct mgmt_cp_set_privacy *cp = cp_data;
6904 	bool changed;
6905 	int err;
6906 
6907 	bt_dev_dbg(hdev, "sock %p", sk);
6908 
6909 	if (!lmp_le_capable(hdev))
6910 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6911 				       MGMT_STATUS_NOT_SUPPORTED);
6912 
6913 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6914 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6915 				       MGMT_STATUS_INVALID_PARAMS);
6916 
6917 	if (hdev_is_powered(hdev))
6918 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6919 				       MGMT_STATUS_REJECTED);
6920 
6921 	hci_dev_lock(hdev);
6922 
6923 	/* If user space supports this command it is also expected to
6924 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6925 	 */
6926 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6927 
6928 	if (cp->privacy) {
6929 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6930 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6931 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6932 		hci_adv_instances_set_rpa_expired(hdev, true);
6933 		if (cp->privacy == 0x02)
6934 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6935 		else
6936 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6937 	} else {
6938 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6939 		memset(hdev->irk, 0, sizeof(hdev->irk));
6940 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6941 		hci_adv_instances_set_rpa_expired(hdev, false);
6942 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6943 	}
6944 
6945 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6946 	if (err < 0)
6947 		goto unlock;
6948 
6949 	if (changed)
6950 		err = new_settings(hdev, sk);
6951 
6952 unlock:
6953 	hci_dev_unlock(hdev);
6954 	return err;
6955 }
6956 
6957 static bool irk_is_valid(struct mgmt_irk_info *irk)
6958 {
6959 	switch (irk->addr.type) {
6960 	case BDADDR_LE_PUBLIC:
6961 		return true;
6962 
6963 	case BDADDR_LE_RANDOM:
6964 		/* Two most significant bits shall be set */
6965 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6966 			return false;
6967 		return true;
6968 	}
6969 
6970 	return false;
6971 }
6972 
6973 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6974 		     u16 len)
6975 {
6976 	struct mgmt_cp_load_irks *cp = cp_data;
6977 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6978 				   sizeof(struct mgmt_irk_info));
6979 	u16 irk_count, expected_len;
6980 	int i, err;
6981 
6982 	bt_dev_dbg(hdev, "sock %p", sk);
6983 
6984 	if (!lmp_le_capable(hdev))
6985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6986 				       MGMT_STATUS_NOT_SUPPORTED);
6987 
6988 	irk_count = __le16_to_cpu(cp->irk_count);
6989 	if (irk_count > max_irk_count) {
6990 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6991 			   irk_count);
6992 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6993 				       MGMT_STATUS_INVALID_PARAMS);
6994 	}
6995 
6996 	expected_len = struct_size(cp, irks, irk_count);
6997 	if (expected_len != len) {
6998 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6999 			   expected_len, len);
7000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7001 				       MGMT_STATUS_INVALID_PARAMS);
7002 	}
7003 
7004 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7005 
7006 	for (i = 0; i < irk_count; i++) {
7007 		struct mgmt_irk_info *key = &cp->irks[i];
7008 
7009 		if (!irk_is_valid(key))
7010 			return mgmt_cmd_status(sk, hdev->id,
7011 					       MGMT_OP_LOAD_IRKS,
7012 					       MGMT_STATUS_INVALID_PARAMS);
7013 	}
7014 
7015 	hci_dev_lock(hdev);
7016 
7017 	hci_smp_irks_clear(hdev);
7018 
7019 	for (i = 0; i < irk_count; i++) {
7020 		struct mgmt_irk_info *irk = &cp->irks[i];
7021 
7022 		if (hci_is_blocked_key(hdev,
7023 				       HCI_BLOCKED_KEY_TYPE_IRK,
7024 				       irk->val)) {
7025 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7026 				    &irk->addr.bdaddr);
7027 			continue;
7028 		}
7029 
7030 		hci_add_irk(hdev, &irk->addr.bdaddr,
7031 			    le_addr_type(irk->addr.type), irk->val,
7032 			    BDADDR_ANY);
7033 	}
7034 
7035 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7036 
7037 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7038 
7039 	hci_dev_unlock(hdev);
7040 
7041 	return err;
7042 }
7043 
7044 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7045 {
7046 	if (key->initiator != 0x00 && key->initiator != 0x01)
7047 		return false;
7048 
7049 	switch (key->addr.type) {
7050 	case BDADDR_LE_PUBLIC:
7051 		return true;
7052 
7053 	case BDADDR_LE_RANDOM:
7054 		/* Two most significant bits shall be set */
7055 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7056 			return false;
7057 		return true;
7058 	}
7059 
7060 	return false;
7061 }
7062 
7063 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7064 			       void *cp_data, u16 len)
7065 {
7066 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7067 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7068 				   sizeof(struct mgmt_ltk_info));
7069 	u16 key_count, expected_len;
7070 	int i, err;
7071 
7072 	bt_dev_dbg(hdev, "sock %p", sk);
7073 
7074 	if (!lmp_le_capable(hdev))
7075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7076 				       MGMT_STATUS_NOT_SUPPORTED);
7077 
7078 	key_count = __le16_to_cpu(cp->key_count);
7079 	if (key_count > max_key_count) {
7080 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7081 			   key_count);
7082 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7083 				       MGMT_STATUS_INVALID_PARAMS);
7084 	}
7085 
7086 	expected_len = struct_size(cp, keys, key_count);
7087 	if (expected_len != len) {
7088 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7089 			   expected_len, len);
7090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7091 				       MGMT_STATUS_INVALID_PARAMS);
7092 	}
7093 
7094 	bt_dev_dbg(hdev, "key_count %u", key_count);
7095 
7096 	hci_dev_lock(hdev);
7097 
7098 	hci_smp_ltks_clear(hdev);
7099 
7100 	for (i = 0; i < key_count; i++) {
7101 		struct mgmt_ltk_info *key = &cp->keys[i];
7102 		u8 type, authenticated;
7103 
7104 		if (hci_is_blocked_key(hdev,
7105 				       HCI_BLOCKED_KEY_TYPE_LTK,
7106 				       key->val)) {
7107 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7108 				    &key->addr.bdaddr);
7109 			continue;
7110 		}
7111 
7112 		if (!ltk_is_valid(key)) {
7113 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7114 				    &key->addr.bdaddr);
7115 			continue;
7116 		}
7117 
7118 		switch (key->type) {
7119 		case MGMT_LTK_UNAUTHENTICATED:
7120 			authenticated = 0x00;
7121 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7122 			break;
7123 		case MGMT_LTK_AUTHENTICATED:
7124 			authenticated = 0x01;
7125 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7126 			break;
7127 		case MGMT_LTK_P256_UNAUTH:
7128 			authenticated = 0x00;
7129 			type = SMP_LTK_P256;
7130 			break;
7131 		case MGMT_LTK_P256_AUTH:
7132 			authenticated = 0x01;
7133 			type = SMP_LTK_P256;
7134 			break;
7135 		case MGMT_LTK_P256_DEBUG:
7136 			authenticated = 0x00;
7137 			type = SMP_LTK_P256_DEBUG;
7138 			fallthrough;
7139 		default:
7140 			continue;
7141 		}
7142 
7143 		hci_add_ltk(hdev, &key->addr.bdaddr,
7144 			    le_addr_type(key->addr.type), type, authenticated,
7145 			    key->val, key->enc_size, key->ediv, key->rand);
7146 	}
7147 
7148 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7149 			   NULL, 0);
7150 
7151 	hci_dev_unlock(hdev);
7152 
7153 	return err;
7154 }
7155 
7156 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7157 {
7158 	struct mgmt_pending_cmd *cmd = data;
7159 	struct hci_conn *conn = cmd->user_data;
7160 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7161 	struct mgmt_rp_get_conn_info rp;
7162 	u8 status;
7163 
7164 	bt_dev_dbg(hdev, "err %d", err);
7165 
7166 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7167 
7168 	status = mgmt_status(err);
7169 	if (status == MGMT_STATUS_SUCCESS) {
7170 		rp.rssi = conn->rssi;
7171 		rp.tx_power = conn->tx_power;
7172 		rp.max_tx_power = conn->max_tx_power;
7173 	} else {
7174 		rp.rssi = HCI_RSSI_INVALID;
7175 		rp.tx_power = HCI_TX_POWER_INVALID;
7176 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7177 	}
7178 
7179 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7180 			  &rp, sizeof(rp));
7181 
7182 	mgmt_pending_free(cmd);
7183 }
7184 
7185 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7186 {
7187 	struct mgmt_pending_cmd *cmd = data;
7188 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7189 	struct hci_conn *conn;
7190 	int err;
7191 	__le16   handle;
7192 
7193 	/* Make sure we are still connected */
7194 	if (cp->addr.type == BDADDR_BREDR)
7195 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7196 					       &cp->addr.bdaddr);
7197 	else
7198 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7199 
7200 	if (!conn || conn->state != BT_CONNECTED)
7201 		return MGMT_STATUS_NOT_CONNECTED;
7202 
7203 	cmd->user_data = conn;
7204 	handle = cpu_to_le16(conn->handle);
7205 
7206 	/* Refresh RSSI each time */
7207 	err = hci_read_rssi_sync(hdev, handle);
7208 
7209 	/* For LE links TX power does not change thus we don't need to
7210 	 * query for it once value is known.
7211 	 */
7212 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7213 		     conn->tx_power == HCI_TX_POWER_INVALID))
7214 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7215 
7216 	/* Max TX power needs to be read only once per connection */
7217 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7218 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7219 
7220 	return err;
7221 }
7222 
7223 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7224 			 u16 len)
7225 {
7226 	struct mgmt_cp_get_conn_info *cp = data;
7227 	struct mgmt_rp_get_conn_info rp;
7228 	struct hci_conn *conn;
7229 	unsigned long conn_info_age;
7230 	int err = 0;
7231 
7232 	bt_dev_dbg(hdev, "sock %p", sk);
7233 
7234 	memset(&rp, 0, sizeof(rp));
7235 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7236 	rp.addr.type = cp->addr.type;
7237 
7238 	if (!bdaddr_type_is_valid(cp->addr.type))
7239 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7240 					 MGMT_STATUS_INVALID_PARAMS,
7241 					 &rp, sizeof(rp));
7242 
7243 	hci_dev_lock(hdev);
7244 
7245 	if (!hdev_is_powered(hdev)) {
7246 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7247 					MGMT_STATUS_NOT_POWERED, &rp,
7248 					sizeof(rp));
7249 		goto unlock;
7250 	}
7251 
7252 	if (cp->addr.type == BDADDR_BREDR)
7253 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7254 					       &cp->addr.bdaddr);
7255 	else
7256 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7257 
7258 	if (!conn || conn->state != BT_CONNECTED) {
7259 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7260 					MGMT_STATUS_NOT_CONNECTED, &rp,
7261 					sizeof(rp));
7262 		goto unlock;
7263 	}
7264 
7265 	/* To avoid client trying to guess when to poll again for information we
7266 	 * calculate conn info age as random value between min/max set in hdev.
7267 	 */
7268 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7269 						 hdev->conn_info_max_age - 1);
7270 
7271 	/* Query controller to refresh cached values if they are too old or were
7272 	 * never read.
7273 	 */
7274 	if (time_after(jiffies, conn->conn_info_timestamp +
7275 		       msecs_to_jiffies(conn_info_age)) ||
7276 	    !conn->conn_info_timestamp) {
7277 		struct mgmt_pending_cmd *cmd;
7278 
7279 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7280 				       len);
7281 		if (!cmd) {
7282 			err = -ENOMEM;
7283 		} else {
7284 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7285 						 cmd, get_conn_info_complete);
7286 		}
7287 
7288 		if (err < 0) {
7289 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7290 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7291 
7292 			if (cmd)
7293 				mgmt_pending_free(cmd);
7294 
7295 			goto unlock;
7296 		}
7297 
7298 		conn->conn_info_timestamp = jiffies;
7299 	} else {
7300 		/* Cache is valid, just reply with values cached in hci_conn */
7301 		rp.rssi = conn->rssi;
7302 		rp.tx_power = conn->tx_power;
7303 		rp.max_tx_power = conn->max_tx_power;
7304 
7305 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7306 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7307 	}
7308 
7309 unlock:
7310 	hci_dev_unlock(hdev);
7311 	return err;
7312 }
7313 
7314 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7315 {
7316 	struct mgmt_pending_cmd *cmd = data;
7317 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7318 	struct mgmt_rp_get_clock_info rp;
7319 	struct hci_conn *conn = cmd->user_data;
7320 	u8 status = mgmt_status(err);
7321 
7322 	bt_dev_dbg(hdev, "err %d", err);
7323 
7324 	memset(&rp, 0, sizeof(rp));
7325 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7326 	rp.addr.type = cp->addr.type;
7327 
7328 	if (err)
7329 		goto complete;
7330 
7331 	rp.local_clock = cpu_to_le32(hdev->clock);
7332 
7333 	if (conn) {
7334 		rp.piconet_clock = cpu_to_le32(conn->clock);
7335 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7336 	}
7337 
7338 complete:
7339 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7340 			  sizeof(rp));
7341 
7342 	mgmt_pending_free(cmd);
7343 }
7344 
7345 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7346 {
7347 	struct mgmt_pending_cmd *cmd = data;
7348 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7349 	struct hci_cp_read_clock hci_cp;
7350 	struct hci_conn *conn;
7351 
7352 	memset(&hci_cp, 0, sizeof(hci_cp));
7353 	hci_read_clock_sync(hdev, &hci_cp);
7354 
7355 	/* Make sure connection still exists */
7356 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7357 	if (!conn || conn->state != BT_CONNECTED)
7358 		return MGMT_STATUS_NOT_CONNECTED;
7359 
7360 	cmd->user_data = conn;
7361 	hci_cp.handle = cpu_to_le16(conn->handle);
7362 	hci_cp.which = 0x01; /* Piconet clock */
7363 
7364 	return hci_read_clock_sync(hdev, &hci_cp);
7365 }
7366 
7367 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7368 								u16 len)
7369 {
7370 	struct mgmt_cp_get_clock_info *cp = data;
7371 	struct mgmt_rp_get_clock_info rp;
7372 	struct mgmt_pending_cmd *cmd;
7373 	struct hci_conn *conn;
7374 	int err;
7375 
7376 	bt_dev_dbg(hdev, "sock %p", sk);
7377 
7378 	memset(&rp, 0, sizeof(rp));
7379 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7380 	rp.addr.type = cp->addr.type;
7381 
7382 	if (cp->addr.type != BDADDR_BREDR)
7383 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7384 					 MGMT_STATUS_INVALID_PARAMS,
7385 					 &rp, sizeof(rp));
7386 
7387 	hci_dev_lock(hdev);
7388 
7389 	if (!hdev_is_powered(hdev)) {
7390 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7391 					MGMT_STATUS_NOT_POWERED, &rp,
7392 					sizeof(rp));
7393 		goto unlock;
7394 	}
7395 
7396 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7397 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7398 					       &cp->addr.bdaddr);
7399 		if (!conn || conn->state != BT_CONNECTED) {
7400 			err = mgmt_cmd_complete(sk, hdev->id,
7401 						MGMT_OP_GET_CLOCK_INFO,
7402 						MGMT_STATUS_NOT_CONNECTED,
7403 						&rp, sizeof(rp));
7404 			goto unlock;
7405 		}
7406 	} else {
7407 		conn = NULL;
7408 	}
7409 
7410 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7411 	if (!cmd)
7412 		err = -ENOMEM;
7413 	else
7414 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7415 					 get_clock_info_complete);
7416 
7417 	if (err < 0) {
7418 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7419 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7420 
7421 		if (cmd)
7422 			mgmt_pending_free(cmd);
7423 	}
7424 
7425 
7426 unlock:
7427 	hci_dev_unlock(hdev);
7428 	return err;
7429 }
7430 
7431 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7432 {
7433 	struct hci_conn *conn;
7434 
7435 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7436 	if (!conn)
7437 		return false;
7438 
7439 	if (conn->dst_type != type)
7440 		return false;
7441 
7442 	if (conn->state != BT_CONNECTED)
7443 		return false;
7444 
7445 	return true;
7446 }
7447 
7448 /* This function requires the caller holds hdev->lock */
7449 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7450 			       u8 addr_type, u8 auto_connect)
7451 {
7452 	struct hci_conn_params *params;
7453 
7454 	params = hci_conn_params_add(hdev, addr, addr_type);
7455 	if (!params)
7456 		return -EIO;
7457 
7458 	if (params->auto_connect == auto_connect)
7459 		return 0;
7460 
7461 	hci_pend_le_list_del_init(params);
7462 
7463 	switch (auto_connect) {
7464 	case HCI_AUTO_CONN_DISABLED:
7465 	case HCI_AUTO_CONN_LINK_LOSS:
7466 		/* If auto connect is being disabled when we're trying to
7467 		 * connect to device, keep connecting.
7468 		 */
7469 		if (params->explicit_connect)
7470 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7471 		break;
7472 	case HCI_AUTO_CONN_REPORT:
7473 		if (params->explicit_connect)
7474 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7475 		else
7476 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7477 		break;
7478 	case HCI_AUTO_CONN_DIRECT:
7479 	case HCI_AUTO_CONN_ALWAYS:
7480 		if (!is_connected(hdev, addr, addr_type))
7481 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7482 		break;
7483 	}
7484 
7485 	params->auto_connect = auto_connect;
7486 
7487 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7488 		   addr, addr_type, auto_connect);
7489 
7490 	return 0;
7491 }
7492 
7493 static void device_added(struct sock *sk, struct hci_dev *hdev,
7494 			 bdaddr_t *bdaddr, u8 type, u8 action)
7495 {
7496 	struct mgmt_ev_device_added ev;
7497 
7498 	bacpy(&ev.addr.bdaddr, bdaddr);
7499 	ev.addr.type = type;
7500 	ev.action = action;
7501 
7502 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7503 }
7504 
7505 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7506 {
7507 	struct mgmt_pending_cmd *cmd = data;
7508 	struct mgmt_cp_add_device *cp = cmd->param;
7509 
7510 	if (!err) {
7511 		struct hci_conn_params *params;
7512 
7513 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7514 						le_addr_type(cp->addr.type));
7515 
7516 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7517 			     cp->action);
7518 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7519 				     cp->addr.type, hdev->conn_flags,
7520 				     params ? params->flags : 0);
7521 	}
7522 
7523 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7524 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7525 	mgmt_pending_free(cmd);
7526 }
7527 
7528 static int add_device_sync(struct hci_dev *hdev, void *data)
7529 {
7530 	return hci_update_passive_scan_sync(hdev);
7531 }
7532 
7533 static int add_device(struct sock *sk, struct hci_dev *hdev,
7534 		      void *data, u16 len)
7535 {
7536 	struct mgmt_pending_cmd *cmd;
7537 	struct mgmt_cp_add_device *cp = data;
7538 	u8 auto_conn, addr_type;
7539 	struct hci_conn_params *params;
7540 	int err;
7541 	u32 current_flags = 0;
7542 	u32 supported_flags;
7543 
7544 	bt_dev_dbg(hdev, "sock %p", sk);
7545 
7546 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7547 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7548 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7549 					 MGMT_STATUS_INVALID_PARAMS,
7550 					 &cp->addr, sizeof(cp->addr));
7551 
7552 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7553 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7554 					 MGMT_STATUS_INVALID_PARAMS,
7555 					 &cp->addr, sizeof(cp->addr));
7556 
7557 	hci_dev_lock(hdev);
7558 
7559 	if (cp->addr.type == BDADDR_BREDR) {
7560 		/* Only incoming connections action is supported for now */
7561 		if (cp->action != 0x01) {
7562 			err = mgmt_cmd_complete(sk, hdev->id,
7563 						MGMT_OP_ADD_DEVICE,
7564 						MGMT_STATUS_INVALID_PARAMS,
7565 						&cp->addr, sizeof(cp->addr));
7566 			goto unlock;
7567 		}
7568 
7569 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7570 						     &cp->addr.bdaddr,
7571 						     cp->addr.type, 0);
7572 		if (err)
7573 			goto unlock;
7574 
7575 		hci_update_scan(hdev);
7576 
7577 		goto added;
7578 	}
7579 
7580 	addr_type = le_addr_type(cp->addr.type);
7581 
7582 	if (cp->action == 0x02)
7583 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7584 	else if (cp->action == 0x01)
7585 		auto_conn = HCI_AUTO_CONN_DIRECT;
7586 	else
7587 		auto_conn = HCI_AUTO_CONN_REPORT;
7588 
7589 	/* Kernel internally uses conn_params with resolvable private
7590 	 * address, but Add Device allows only identity addresses.
7591 	 * Make sure it is enforced before calling
7592 	 * hci_conn_params_lookup.
7593 	 */
7594 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7595 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7596 					MGMT_STATUS_INVALID_PARAMS,
7597 					&cp->addr, sizeof(cp->addr));
7598 		goto unlock;
7599 	}
7600 
7601 	/* If the connection parameters don't exist for this device,
7602 	 * they will be created and configured with defaults.
7603 	 */
7604 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7605 				auto_conn) < 0) {
7606 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7607 					MGMT_STATUS_FAILED, &cp->addr,
7608 					sizeof(cp->addr));
7609 		goto unlock;
7610 	} else {
7611 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7612 						addr_type);
7613 		if (params)
7614 			current_flags = params->flags;
7615 	}
7616 
7617 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7618 	if (!cmd) {
7619 		err = -ENOMEM;
7620 		goto unlock;
7621 	}
7622 
7623 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7624 				 add_device_complete);
7625 	if (err < 0) {
7626 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7627 					MGMT_STATUS_FAILED, &cp->addr,
7628 					sizeof(cp->addr));
7629 		mgmt_pending_free(cmd);
7630 	}
7631 
7632 	goto unlock;
7633 
7634 added:
7635 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7636 	supported_flags = hdev->conn_flags;
7637 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7638 			     supported_flags, current_flags);
7639 
7640 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 				MGMT_STATUS_SUCCESS, &cp->addr,
7642 				sizeof(cp->addr));
7643 
7644 unlock:
7645 	hci_dev_unlock(hdev);
7646 	return err;
7647 }
7648 
7649 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7650 			   bdaddr_t *bdaddr, u8 type)
7651 {
7652 	struct mgmt_ev_device_removed ev;
7653 
7654 	bacpy(&ev.addr.bdaddr, bdaddr);
7655 	ev.addr.type = type;
7656 
7657 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7658 }
7659 
7660 static int remove_device_sync(struct hci_dev *hdev, void *data)
7661 {
7662 	return hci_update_passive_scan_sync(hdev);
7663 }
7664 
7665 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7666 			 void *data, u16 len)
7667 {
7668 	struct mgmt_cp_remove_device *cp = data;
7669 	int err;
7670 
7671 	bt_dev_dbg(hdev, "sock %p", sk);
7672 
7673 	hci_dev_lock(hdev);
7674 
7675 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7676 		struct hci_conn_params *params;
7677 		u8 addr_type;
7678 
7679 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7680 			err = mgmt_cmd_complete(sk, hdev->id,
7681 						MGMT_OP_REMOVE_DEVICE,
7682 						MGMT_STATUS_INVALID_PARAMS,
7683 						&cp->addr, sizeof(cp->addr));
7684 			goto unlock;
7685 		}
7686 
7687 		if (cp->addr.type == BDADDR_BREDR) {
7688 			err = hci_bdaddr_list_del(&hdev->accept_list,
7689 						  &cp->addr.bdaddr,
7690 						  cp->addr.type);
7691 			if (err) {
7692 				err = mgmt_cmd_complete(sk, hdev->id,
7693 							MGMT_OP_REMOVE_DEVICE,
7694 							MGMT_STATUS_INVALID_PARAMS,
7695 							&cp->addr,
7696 							sizeof(cp->addr));
7697 				goto unlock;
7698 			}
7699 
7700 			hci_update_scan(hdev);
7701 
7702 			device_removed(sk, hdev, &cp->addr.bdaddr,
7703 				       cp->addr.type);
7704 			goto complete;
7705 		}
7706 
7707 		addr_type = le_addr_type(cp->addr.type);
7708 
7709 		/* Kernel internally uses conn_params with resolvable private
7710 		 * address, but Remove Device allows only identity addresses.
7711 		 * Make sure it is enforced before calling
7712 		 * hci_conn_params_lookup.
7713 		 */
7714 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7715 			err = mgmt_cmd_complete(sk, hdev->id,
7716 						MGMT_OP_REMOVE_DEVICE,
7717 						MGMT_STATUS_INVALID_PARAMS,
7718 						&cp->addr, sizeof(cp->addr));
7719 			goto unlock;
7720 		}
7721 
7722 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7723 						addr_type);
7724 		if (!params) {
7725 			err = mgmt_cmd_complete(sk, hdev->id,
7726 						MGMT_OP_REMOVE_DEVICE,
7727 						MGMT_STATUS_INVALID_PARAMS,
7728 						&cp->addr, sizeof(cp->addr));
7729 			goto unlock;
7730 		}
7731 
7732 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7733 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7734 			err = mgmt_cmd_complete(sk, hdev->id,
7735 						MGMT_OP_REMOVE_DEVICE,
7736 						MGMT_STATUS_INVALID_PARAMS,
7737 						&cp->addr, sizeof(cp->addr));
7738 			goto unlock;
7739 		}
7740 
7741 		hci_conn_params_free(params);
7742 
7743 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7744 	} else {
7745 		struct hci_conn_params *p, *tmp;
7746 		struct bdaddr_list *b, *btmp;
7747 
7748 		if (cp->addr.type) {
7749 			err = mgmt_cmd_complete(sk, hdev->id,
7750 						MGMT_OP_REMOVE_DEVICE,
7751 						MGMT_STATUS_INVALID_PARAMS,
7752 						&cp->addr, sizeof(cp->addr));
7753 			goto unlock;
7754 		}
7755 
7756 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7757 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7758 			list_del(&b->list);
7759 			kfree(b);
7760 		}
7761 
7762 		hci_update_scan(hdev);
7763 
7764 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7765 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7766 				continue;
7767 			device_removed(sk, hdev, &p->addr, p->addr_type);
7768 			if (p->explicit_connect) {
7769 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7770 				continue;
7771 			}
7772 			hci_conn_params_free(p);
7773 		}
7774 
7775 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7776 	}
7777 
7778 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7779 
7780 complete:
7781 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7782 				MGMT_STATUS_SUCCESS, &cp->addr,
7783 				sizeof(cp->addr));
7784 unlock:
7785 	hci_dev_unlock(hdev);
7786 	return err;
7787 }
7788 
7789 static int conn_update_sync(struct hci_dev *hdev, void *data)
7790 {
7791 	struct hci_conn_params *params = data;
7792 	struct hci_conn *conn;
7793 
7794 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7795 	if (!conn)
7796 		return -ECANCELED;
7797 
7798 	return hci_le_conn_update_sync(hdev, conn, params);
7799 }
7800 
7801 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7802 			   u16 len)
7803 {
7804 	struct mgmt_cp_load_conn_param *cp = data;
7805 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7806 				     sizeof(struct mgmt_conn_param));
7807 	u16 param_count, expected_len;
7808 	int i;
7809 
7810 	if (!lmp_le_capable(hdev))
7811 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7812 				       MGMT_STATUS_NOT_SUPPORTED);
7813 
7814 	param_count = __le16_to_cpu(cp->param_count);
7815 	if (param_count > max_param_count) {
7816 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7817 			   param_count);
7818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7819 				       MGMT_STATUS_INVALID_PARAMS);
7820 	}
7821 
7822 	expected_len = struct_size(cp, params, param_count);
7823 	if (expected_len != len) {
7824 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7825 			   expected_len, len);
7826 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7827 				       MGMT_STATUS_INVALID_PARAMS);
7828 	}
7829 
7830 	bt_dev_dbg(hdev, "param_count %u", param_count);
7831 
7832 	hci_dev_lock(hdev);
7833 
7834 	if (param_count > 1)
7835 		hci_conn_params_clear_disabled(hdev);
7836 
7837 	for (i = 0; i < param_count; i++) {
7838 		struct mgmt_conn_param *param = &cp->params[i];
7839 		struct hci_conn_params *hci_param;
7840 		u16 min, max, latency, timeout;
7841 		bool update = false;
7842 		u8 addr_type;
7843 
7844 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7845 			   param->addr.type);
7846 
7847 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7848 			addr_type = ADDR_LE_DEV_PUBLIC;
7849 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7850 			addr_type = ADDR_LE_DEV_RANDOM;
7851 		} else {
7852 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7853 			continue;
7854 		}
7855 
7856 		min = le16_to_cpu(param->min_interval);
7857 		max = le16_to_cpu(param->max_interval);
7858 		latency = le16_to_cpu(param->latency);
7859 		timeout = le16_to_cpu(param->timeout);
7860 
7861 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7862 			   min, max, latency, timeout);
7863 
7864 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7865 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7866 			continue;
7867 		}
7868 
7869 		/* Detect when the loading is for an existing parameter then
7870 		 * attempt to trigger the connection update procedure.
7871 		 */
7872 		if (!i && param_count == 1) {
7873 			hci_param = hci_conn_params_lookup(hdev,
7874 							   &param->addr.bdaddr,
7875 							   addr_type);
7876 			if (hci_param)
7877 				update = true;
7878 			else
7879 				hci_conn_params_clear_disabled(hdev);
7880 		}
7881 
7882 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7883 						addr_type);
7884 		if (!hci_param) {
7885 			bt_dev_err(hdev, "failed to add connection parameters");
7886 			continue;
7887 		}
7888 
7889 		hci_param->conn_min_interval = min;
7890 		hci_param->conn_max_interval = max;
7891 		hci_param->conn_latency = latency;
7892 		hci_param->supervision_timeout = timeout;
7893 
7894 		/* Check if we need to trigger a connection update */
7895 		if (update) {
7896 			struct hci_conn *conn;
7897 
7898 			/* Lookup for existing connection as central and check
7899 			 * if parameters match and if they don't then trigger
7900 			 * a connection update.
7901 			 */
7902 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
7903 						       addr_type);
7904 			if (conn && conn->role == HCI_ROLE_MASTER &&
7905 			    (conn->le_conn_min_interval != min ||
7906 			     conn->le_conn_max_interval != max ||
7907 			     conn->le_conn_latency != latency ||
7908 			     conn->le_supv_timeout != timeout))
7909 				hci_cmd_sync_queue(hdev, conn_update_sync,
7910 						   hci_param, NULL);
7911 		}
7912 	}
7913 
7914 	hci_dev_unlock(hdev);
7915 
7916 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7917 				 NULL, 0);
7918 }
7919 
7920 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7921 			       void *data, u16 len)
7922 {
7923 	struct mgmt_cp_set_external_config *cp = data;
7924 	bool changed;
7925 	int err;
7926 
7927 	bt_dev_dbg(hdev, "sock %p", sk);
7928 
7929 	if (hdev_is_powered(hdev))
7930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7931 				       MGMT_STATUS_REJECTED);
7932 
7933 	if (cp->config != 0x00 && cp->config != 0x01)
7934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7935 				         MGMT_STATUS_INVALID_PARAMS);
7936 
7937 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7939 				       MGMT_STATUS_NOT_SUPPORTED);
7940 
7941 	hci_dev_lock(hdev);
7942 
7943 	if (cp->config)
7944 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7945 	else
7946 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7947 
7948 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7949 	if (err < 0)
7950 		goto unlock;
7951 
7952 	if (!changed)
7953 		goto unlock;
7954 
7955 	err = new_options(hdev, sk);
7956 
7957 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7958 		mgmt_index_removed(hdev);
7959 
7960 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7961 			hci_dev_set_flag(hdev, HCI_CONFIG);
7962 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7963 
7964 			queue_work(hdev->req_workqueue, &hdev->power_on);
7965 		} else {
7966 			set_bit(HCI_RAW, &hdev->flags);
7967 			mgmt_index_added(hdev);
7968 		}
7969 	}
7970 
7971 unlock:
7972 	hci_dev_unlock(hdev);
7973 	return err;
7974 }
7975 
7976 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7977 			      void *data, u16 len)
7978 {
7979 	struct mgmt_cp_set_public_address *cp = data;
7980 	bool changed;
7981 	int err;
7982 
7983 	bt_dev_dbg(hdev, "sock %p", sk);
7984 
7985 	if (hdev_is_powered(hdev))
7986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7987 				       MGMT_STATUS_REJECTED);
7988 
7989 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7990 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7991 				       MGMT_STATUS_INVALID_PARAMS);
7992 
7993 	if (!hdev->set_bdaddr)
7994 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7995 				       MGMT_STATUS_NOT_SUPPORTED);
7996 
7997 	hci_dev_lock(hdev);
7998 
7999 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8000 	bacpy(&hdev->public_addr, &cp->bdaddr);
8001 
8002 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8003 	if (err < 0)
8004 		goto unlock;
8005 
8006 	if (!changed)
8007 		goto unlock;
8008 
8009 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8010 		err = new_options(hdev, sk);
8011 
8012 	if (is_configured(hdev)) {
8013 		mgmt_index_removed(hdev);
8014 
8015 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8016 
8017 		hci_dev_set_flag(hdev, HCI_CONFIG);
8018 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8019 
8020 		queue_work(hdev->req_workqueue, &hdev->power_on);
8021 	}
8022 
8023 unlock:
8024 	hci_dev_unlock(hdev);
8025 	return err;
8026 }
8027 
8028 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8029 					     int err)
8030 {
8031 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8032 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8033 	u8 *h192, *r192, *h256, *r256;
8034 	struct mgmt_pending_cmd *cmd = data;
8035 	struct sk_buff *skb = cmd->skb;
8036 	u8 status = mgmt_status(err);
8037 	u16 eir_len;
8038 
8039 	if (err == -ECANCELED ||
8040 	    cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8041 		return;
8042 
8043 	if (!status) {
8044 		if (!skb)
8045 			status = MGMT_STATUS_FAILED;
8046 		else if (IS_ERR(skb))
8047 			status = mgmt_status(PTR_ERR(skb));
8048 		else
8049 			status = mgmt_status(skb->data[0]);
8050 	}
8051 
8052 	bt_dev_dbg(hdev, "status %u", status);
8053 
8054 	mgmt_cp = cmd->param;
8055 
8056 	if (status) {
8057 		status = mgmt_status(status);
8058 		eir_len = 0;
8059 
8060 		h192 = NULL;
8061 		r192 = NULL;
8062 		h256 = NULL;
8063 		r256 = NULL;
8064 	} else if (!bredr_sc_enabled(hdev)) {
8065 		struct hci_rp_read_local_oob_data *rp;
8066 
8067 		if (skb->len != sizeof(*rp)) {
8068 			status = MGMT_STATUS_FAILED;
8069 			eir_len = 0;
8070 		} else {
8071 			status = MGMT_STATUS_SUCCESS;
8072 			rp = (void *)skb->data;
8073 
8074 			eir_len = 5 + 18 + 18;
8075 			h192 = rp->hash;
8076 			r192 = rp->rand;
8077 			h256 = NULL;
8078 			r256 = NULL;
8079 		}
8080 	} else {
8081 		struct hci_rp_read_local_oob_ext_data *rp;
8082 
8083 		if (skb->len != sizeof(*rp)) {
8084 			status = MGMT_STATUS_FAILED;
8085 			eir_len = 0;
8086 		} else {
8087 			status = MGMT_STATUS_SUCCESS;
8088 			rp = (void *)skb->data;
8089 
8090 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8091 				eir_len = 5 + 18 + 18;
8092 				h192 = NULL;
8093 				r192 = NULL;
8094 			} else {
8095 				eir_len = 5 + 18 + 18 + 18 + 18;
8096 				h192 = rp->hash192;
8097 				r192 = rp->rand192;
8098 			}
8099 
8100 			h256 = rp->hash256;
8101 			r256 = rp->rand256;
8102 		}
8103 	}
8104 
8105 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8106 	if (!mgmt_rp)
8107 		goto done;
8108 
8109 	if (eir_len == 0)
8110 		goto send_rsp;
8111 
8112 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8113 				  hdev->dev_class, 3);
8114 
8115 	if (h192 && r192) {
8116 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8117 					  EIR_SSP_HASH_C192, h192, 16);
8118 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8119 					  EIR_SSP_RAND_R192, r192, 16);
8120 	}
8121 
8122 	if (h256 && r256) {
8123 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8124 					  EIR_SSP_HASH_C256, h256, 16);
8125 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8126 					  EIR_SSP_RAND_R256, r256, 16);
8127 	}
8128 
8129 send_rsp:
8130 	mgmt_rp->type = mgmt_cp->type;
8131 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8132 
8133 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8134 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8135 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8136 	if (err < 0 || status)
8137 		goto done;
8138 
8139 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8140 
8141 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8142 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8143 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8144 done:
8145 	if (skb && !IS_ERR(skb))
8146 		kfree_skb(skb);
8147 
8148 	kfree(mgmt_rp);
8149 	mgmt_pending_remove(cmd);
8150 }
8151 
8152 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8153 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8154 {
8155 	struct mgmt_pending_cmd *cmd;
8156 	int err;
8157 
8158 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8159 			       cp, sizeof(*cp));
8160 	if (!cmd)
8161 		return -ENOMEM;
8162 
8163 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8164 				 read_local_oob_ext_data_complete);
8165 
8166 	if (err < 0) {
8167 		mgmt_pending_remove(cmd);
8168 		return err;
8169 	}
8170 
8171 	return 0;
8172 }
8173 
8174 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8175 				   void *data, u16 data_len)
8176 {
8177 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8178 	struct mgmt_rp_read_local_oob_ext_data *rp;
8179 	size_t rp_len;
8180 	u16 eir_len;
8181 	u8 status, flags, role, addr[7], hash[16], rand[16];
8182 	int err;
8183 
8184 	bt_dev_dbg(hdev, "sock %p", sk);
8185 
8186 	if (hdev_is_powered(hdev)) {
8187 		switch (cp->type) {
8188 		case BIT(BDADDR_BREDR):
8189 			status = mgmt_bredr_support(hdev);
8190 			if (status)
8191 				eir_len = 0;
8192 			else
8193 				eir_len = 5;
8194 			break;
8195 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8196 			status = mgmt_le_support(hdev);
8197 			if (status)
8198 				eir_len = 0;
8199 			else
8200 				eir_len = 9 + 3 + 18 + 18 + 3;
8201 			break;
8202 		default:
8203 			status = MGMT_STATUS_INVALID_PARAMS;
8204 			eir_len = 0;
8205 			break;
8206 		}
8207 	} else {
8208 		status = MGMT_STATUS_NOT_POWERED;
8209 		eir_len = 0;
8210 	}
8211 
8212 	rp_len = sizeof(*rp) + eir_len;
8213 	rp = kmalloc(rp_len, GFP_ATOMIC);
8214 	if (!rp)
8215 		return -ENOMEM;
8216 
8217 	if (!status && !lmp_ssp_capable(hdev)) {
8218 		status = MGMT_STATUS_NOT_SUPPORTED;
8219 		eir_len = 0;
8220 	}
8221 
8222 	if (status)
8223 		goto complete;
8224 
8225 	hci_dev_lock(hdev);
8226 
8227 	eir_len = 0;
8228 	switch (cp->type) {
8229 	case BIT(BDADDR_BREDR):
8230 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8231 			err = read_local_ssp_oob_req(hdev, sk, cp);
8232 			hci_dev_unlock(hdev);
8233 			if (!err)
8234 				goto done;
8235 
8236 			status = MGMT_STATUS_FAILED;
8237 			goto complete;
8238 		} else {
8239 			eir_len = eir_append_data(rp->eir, eir_len,
8240 						  EIR_CLASS_OF_DEV,
8241 						  hdev->dev_class, 3);
8242 		}
8243 		break;
8244 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8245 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8246 		    smp_generate_oob(hdev, hash, rand) < 0) {
8247 			hci_dev_unlock(hdev);
8248 			status = MGMT_STATUS_FAILED;
8249 			goto complete;
8250 		}
8251 
8252 		/* This should return the active RPA, but since the RPA
8253 		 * is only programmed on demand, it is really hard to fill
8254 		 * this in at the moment. For now disallow retrieving
8255 		 * local out-of-band data when privacy is in use.
8256 		 *
8257 		 * Returning the identity address will not help here since
8258 		 * pairing happens before the identity resolving key is
8259 		 * known and thus the connection establishment happens
8260 		 * based on the RPA and not the identity address.
8261 		 */
8262 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8263 			hci_dev_unlock(hdev);
8264 			status = MGMT_STATUS_REJECTED;
8265 			goto complete;
8266 		}
8267 
8268 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8269 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8270 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8271 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8272 			memcpy(addr, &hdev->static_addr, 6);
8273 			addr[6] = 0x01;
8274 		} else {
8275 			memcpy(addr, &hdev->bdaddr, 6);
8276 			addr[6] = 0x00;
8277 		}
8278 
8279 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8280 					  addr, sizeof(addr));
8281 
8282 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8283 			role = 0x02;
8284 		else
8285 			role = 0x01;
8286 
8287 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8288 					  &role, sizeof(role));
8289 
8290 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8291 			eir_len = eir_append_data(rp->eir, eir_len,
8292 						  EIR_LE_SC_CONFIRM,
8293 						  hash, sizeof(hash));
8294 
8295 			eir_len = eir_append_data(rp->eir, eir_len,
8296 						  EIR_LE_SC_RANDOM,
8297 						  rand, sizeof(rand));
8298 		}
8299 
8300 		flags = mgmt_get_adv_discov_flags(hdev);
8301 
8302 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8303 			flags |= LE_AD_NO_BREDR;
8304 
8305 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8306 					  &flags, sizeof(flags));
8307 		break;
8308 	}
8309 
8310 	hci_dev_unlock(hdev);
8311 
8312 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8313 
8314 	status = MGMT_STATUS_SUCCESS;
8315 
8316 complete:
8317 	rp->type = cp->type;
8318 	rp->eir_len = cpu_to_le16(eir_len);
8319 
8320 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8321 				status, rp, sizeof(*rp) + eir_len);
8322 	if (err < 0 || status)
8323 		goto done;
8324 
8325 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8326 				 rp, sizeof(*rp) + eir_len,
8327 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8328 
8329 done:
8330 	kfree(rp);
8331 
8332 	return err;
8333 }
8334 
8335 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8336 {
8337 	u32 flags = 0;
8338 
8339 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8340 	flags |= MGMT_ADV_FLAG_DISCOV;
8341 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8342 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8343 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8344 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8345 	flags |= MGMT_ADV_PARAM_DURATION;
8346 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8347 	flags |= MGMT_ADV_PARAM_INTERVALS;
8348 	flags |= MGMT_ADV_PARAM_TX_POWER;
8349 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8350 
8351 	/* In extended adv TX_POWER returned from Set Adv Param
8352 	 * will be always valid.
8353 	 */
8354 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8355 		flags |= MGMT_ADV_FLAG_TX_POWER;
8356 
8357 	if (ext_adv_capable(hdev)) {
8358 		flags |= MGMT_ADV_FLAG_SEC_1M;
8359 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8360 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8361 
8362 		if (le_2m_capable(hdev))
8363 			flags |= MGMT_ADV_FLAG_SEC_2M;
8364 
8365 		if (le_coded_capable(hdev))
8366 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8367 	}
8368 
8369 	return flags;
8370 }
8371 
8372 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8373 			     void *data, u16 data_len)
8374 {
8375 	struct mgmt_rp_read_adv_features *rp;
8376 	size_t rp_len;
8377 	int err;
8378 	struct adv_info *adv_instance;
8379 	u32 supported_flags;
8380 	u8 *instance;
8381 
8382 	bt_dev_dbg(hdev, "sock %p", sk);
8383 
8384 	if (!lmp_le_capable(hdev))
8385 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8386 				       MGMT_STATUS_REJECTED);
8387 
8388 	hci_dev_lock(hdev);
8389 
8390 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8391 	rp = kmalloc(rp_len, GFP_ATOMIC);
8392 	if (!rp) {
8393 		hci_dev_unlock(hdev);
8394 		return -ENOMEM;
8395 	}
8396 
8397 	supported_flags = get_supported_adv_flags(hdev);
8398 
8399 	rp->supported_flags = cpu_to_le32(supported_flags);
8400 	rp->max_adv_data_len = max_adv_len(hdev);
8401 	rp->max_scan_rsp_len = max_adv_len(hdev);
8402 	rp->max_instances = hdev->le_num_of_adv_sets;
8403 	rp->num_instances = hdev->adv_instance_cnt;
8404 
8405 	instance = rp->instance;
8406 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8407 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8408 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8409 			*instance = adv_instance->instance;
8410 			instance++;
8411 		} else {
8412 			rp->num_instances--;
8413 			rp_len--;
8414 		}
8415 	}
8416 
8417 	hci_dev_unlock(hdev);
8418 
8419 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8420 				MGMT_STATUS_SUCCESS, rp, rp_len);
8421 
8422 	kfree(rp);
8423 
8424 	return err;
8425 }
8426 
8427 static u8 calculate_name_len(struct hci_dev *hdev)
8428 {
8429 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8430 
8431 	return eir_append_local_name(hdev, buf, 0);
8432 }
8433 
8434 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8435 			   bool is_adv_data)
8436 {
8437 	u8 max_len = max_adv_len(hdev);
8438 
8439 	if (is_adv_data) {
8440 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8441 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8442 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8443 			max_len -= 3;
8444 
8445 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8446 			max_len -= 3;
8447 	} else {
8448 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8449 			max_len -= calculate_name_len(hdev);
8450 
8451 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8452 			max_len -= 4;
8453 	}
8454 
8455 	return max_len;
8456 }
8457 
8458 static bool flags_managed(u32 adv_flags)
8459 {
8460 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8461 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8462 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8463 }
8464 
8465 static bool tx_power_managed(u32 adv_flags)
8466 {
8467 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8468 }
8469 
8470 static bool name_managed(u32 adv_flags)
8471 {
8472 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8473 }
8474 
8475 static bool appearance_managed(u32 adv_flags)
8476 {
8477 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8478 }
8479 
8480 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8481 			      u8 len, bool is_adv_data)
8482 {
8483 	int i, cur_len;
8484 	u8 max_len;
8485 
8486 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8487 
8488 	if (len > max_len)
8489 		return false;
8490 
8491 	/* Make sure that the data is correctly formatted. */
8492 	for (i = 0; i < len; i += (cur_len + 1)) {
8493 		cur_len = data[i];
8494 
8495 		if (!cur_len)
8496 			continue;
8497 
8498 		if (data[i + 1] == EIR_FLAGS &&
8499 		    (!is_adv_data || flags_managed(adv_flags)))
8500 			return false;
8501 
8502 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8503 			return false;
8504 
8505 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8506 			return false;
8507 
8508 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8509 			return false;
8510 
8511 		if (data[i + 1] == EIR_APPEARANCE &&
8512 		    appearance_managed(adv_flags))
8513 			return false;
8514 
8515 		/* If the current field length would exceed the total data
8516 		 * length, then it's invalid.
8517 		 */
8518 		if (i + cur_len >= len)
8519 			return false;
8520 	}
8521 
8522 	return true;
8523 }
8524 
8525 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8526 {
8527 	u32 supported_flags, phy_flags;
8528 
8529 	/* The current implementation only supports a subset of the specified
8530 	 * flags. Also need to check mutual exclusiveness of sec flags.
8531 	 */
8532 	supported_flags = get_supported_adv_flags(hdev);
8533 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8534 	if (adv_flags & ~supported_flags ||
8535 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8536 		return false;
8537 
8538 	return true;
8539 }
8540 
8541 static bool adv_busy(struct hci_dev *hdev)
8542 {
8543 	return pending_find(MGMT_OP_SET_LE, hdev);
8544 }
8545 
8546 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8547 			     int err)
8548 {
8549 	struct adv_info *adv, *n;
8550 
8551 	bt_dev_dbg(hdev, "err %d", err);
8552 
8553 	hci_dev_lock(hdev);
8554 
8555 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8556 		u8 instance;
8557 
8558 		if (!adv->pending)
8559 			continue;
8560 
8561 		if (!err) {
8562 			adv->pending = false;
8563 			continue;
8564 		}
8565 
8566 		instance = adv->instance;
8567 
8568 		if (hdev->cur_adv_instance == instance)
8569 			cancel_adv_timeout(hdev);
8570 
8571 		hci_remove_adv_instance(hdev, instance);
8572 		mgmt_advertising_removed(sk, hdev, instance);
8573 	}
8574 
8575 	hci_dev_unlock(hdev);
8576 }
8577 
8578 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8579 {
8580 	struct mgmt_pending_cmd *cmd = data;
8581 	struct mgmt_cp_add_advertising *cp = cmd->param;
8582 	struct mgmt_rp_add_advertising rp;
8583 
8584 	memset(&rp, 0, sizeof(rp));
8585 
8586 	rp.instance = cp->instance;
8587 
8588 	if (err)
8589 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8590 				mgmt_status(err));
8591 	else
8592 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8593 				  mgmt_status(err), &rp, sizeof(rp));
8594 
8595 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8596 
8597 	mgmt_pending_free(cmd);
8598 }
8599 
8600 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8601 {
8602 	struct mgmt_pending_cmd *cmd = data;
8603 	struct mgmt_cp_add_advertising *cp = cmd->param;
8604 
8605 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8606 }
8607 
8608 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8609 			   void *data, u16 data_len)
8610 {
8611 	struct mgmt_cp_add_advertising *cp = data;
8612 	struct mgmt_rp_add_advertising rp;
8613 	u32 flags;
8614 	u8 status;
8615 	u16 timeout, duration;
8616 	unsigned int prev_instance_cnt;
8617 	u8 schedule_instance = 0;
8618 	struct adv_info *adv, *next_instance;
8619 	int err;
8620 	struct mgmt_pending_cmd *cmd;
8621 
8622 	bt_dev_dbg(hdev, "sock %p", sk);
8623 
8624 	status = mgmt_le_support(hdev);
8625 	if (status)
8626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627 				       status);
8628 
8629 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8630 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 				       MGMT_STATUS_INVALID_PARAMS);
8632 
8633 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8634 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635 				       MGMT_STATUS_INVALID_PARAMS);
8636 
8637 	flags = __le32_to_cpu(cp->flags);
8638 	timeout = __le16_to_cpu(cp->timeout);
8639 	duration = __le16_to_cpu(cp->duration);
8640 
8641 	if (!requested_adv_flags_are_valid(hdev, flags))
8642 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8643 				       MGMT_STATUS_INVALID_PARAMS);
8644 
8645 	hci_dev_lock(hdev);
8646 
8647 	if (timeout && !hdev_is_powered(hdev)) {
8648 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8649 				      MGMT_STATUS_REJECTED);
8650 		goto unlock;
8651 	}
8652 
8653 	if (adv_busy(hdev)) {
8654 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8655 				      MGMT_STATUS_BUSY);
8656 		goto unlock;
8657 	}
8658 
8659 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8660 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8661 			       cp->scan_rsp_len, false)) {
8662 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8663 				      MGMT_STATUS_INVALID_PARAMS);
8664 		goto unlock;
8665 	}
8666 
8667 	prev_instance_cnt = hdev->adv_instance_cnt;
8668 
8669 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8670 				   cp->adv_data_len, cp->data,
8671 				   cp->scan_rsp_len,
8672 				   cp->data + cp->adv_data_len,
8673 				   timeout, duration,
8674 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8675 				   hdev->le_adv_min_interval,
8676 				   hdev->le_adv_max_interval, 0);
8677 	if (IS_ERR(adv)) {
8678 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8679 				      MGMT_STATUS_FAILED);
8680 		goto unlock;
8681 	}
8682 
8683 	/* Only trigger an advertising added event if a new instance was
8684 	 * actually added.
8685 	 */
8686 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8687 		mgmt_advertising_added(sk, hdev, cp->instance);
8688 
8689 	if (hdev->cur_adv_instance == cp->instance) {
8690 		/* If the currently advertised instance is being changed then
8691 		 * cancel the current advertising and schedule the next
8692 		 * instance. If there is only one instance then the overridden
8693 		 * advertising data will be visible right away.
8694 		 */
8695 		cancel_adv_timeout(hdev);
8696 
8697 		next_instance = hci_get_next_instance(hdev, cp->instance);
8698 		if (next_instance)
8699 			schedule_instance = next_instance->instance;
8700 	} else if (!hdev->adv_instance_timeout) {
8701 		/* Immediately advertise the new instance if no other
8702 		 * instance is currently being advertised.
8703 		 */
8704 		schedule_instance = cp->instance;
8705 	}
8706 
8707 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8708 	 * there is no instance to be advertised then we have no HCI
8709 	 * communication to make. Simply return.
8710 	 */
8711 	if (!hdev_is_powered(hdev) ||
8712 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8713 	    !schedule_instance) {
8714 		rp.instance = cp->instance;
8715 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8716 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8717 		goto unlock;
8718 	}
8719 
8720 	/* We're good to go, update advertising data, parameters, and start
8721 	 * advertising.
8722 	 */
8723 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8724 			       data_len);
8725 	if (!cmd) {
8726 		err = -ENOMEM;
8727 		goto unlock;
8728 	}
8729 
8730 	cp->instance = schedule_instance;
8731 
8732 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8733 				 add_advertising_complete);
8734 	if (err < 0)
8735 		mgmt_pending_free(cmd);
8736 
8737 unlock:
8738 	hci_dev_unlock(hdev);
8739 
8740 	return err;
8741 }
8742 
8743 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8744 					int err)
8745 {
8746 	struct mgmt_pending_cmd *cmd = data;
8747 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8748 	struct mgmt_rp_add_ext_adv_params rp;
8749 	struct adv_info *adv;
8750 	u32 flags;
8751 
8752 	BT_DBG("%s", hdev->name);
8753 
8754 	hci_dev_lock(hdev);
8755 
8756 	adv = hci_find_adv_instance(hdev, cp->instance);
8757 	if (!adv)
8758 		goto unlock;
8759 
8760 	rp.instance = cp->instance;
8761 	rp.tx_power = adv->tx_power;
8762 
8763 	/* While we're at it, inform userspace of the available space for this
8764 	 * advertisement, given the flags that will be used.
8765 	 */
8766 	flags = __le32_to_cpu(cp->flags);
8767 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8768 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8769 
8770 	if (err) {
8771 		/* If this advertisement was previously advertising and we
8772 		 * failed to update it, we signal that it has been removed and
8773 		 * delete its structure
8774 		 */
8775 		if (!adv->pending)
8776 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8777 
8778 		hci_remove_adv_instance(hdev, cp->instance);
8779 
8780 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8781 				mgmt_status(err));
8782 	} else {
8783 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8784 				  mgmt_status(err), &rp, sizeof(rp));
8785 	}
8786 
8787 unlock:
8788 	mgmt_pending_free(cmd);
8789 
8790 	hci_dev_unlock(hdev);
8791 }
8792 
8793 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8794 {
8795 	struct mgmt_pending_cmd *cmd = data;
8796 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8797 
8798 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8799 }
8800 
8801 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8802 			      void *data, u16 data_len)
8803 {
8804 	struct mgmt_cp_add_ext_adv_params *cp = data;
8805 	struct mgmt_rp_add_ext_adv_params rp;
8806 	struct mgmt_pending_cmd *cmd = NULL;
8807 	struct adv_info *adv;
8808 	u32 flags, min_interval, max_interval;
8809 	u16 timeout, duration;
8810 	u8 status;
8811 	s8 tx_power;
8812 	int err;
8813 
8814 	BT_DBG("%s", hdev->name);
8815 
8816 	status = mgmt_le_support(hdev);
8817 	if (status)
8818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 				       status);
8820 
8821 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8823 				       MGMT_STATUS_INVALID_PARAMS);
8824 
8825 	/* The purpose of breaking add_advertising into two separate MGMT calls
8826 	 * for params and data is to allow more parameters to be added to this
8827 	 * structure in the future. For this reason, we verify that we have the
8828 	 * bare minimum structure we know of when the interface was defined. Any
8829 	 * extra parameters we don't know about will be ignored in this request.
8830 	 */
8831 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8832 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8833 				       MGMT_STATUS_INVALID_PARAMS);
8834 
8835 	flags = __le32_to_cpu(cp->flags);
8836 
8837 	if (!requested_adv_flags_are_valid(hdev, flags))
8838 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8839 				       MGMT_STATUS_INVALID_PARAMS);
8840 
8841 	hci_dev_lock(hdev);
8842 
8843 	/* In new interface, we require that we are powered to register */
8844 	if (!hdev_is_powered(hdev)) {
8845 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8846 				      MGMT_STATUS_REJECTED);
8847 		goto unlock;
8848 	}
8849 
8850 	if (adv_busy(hdev)) {
8851 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8852 				      MGMT_STATUS_BUSY);
8853 		goto unlock;
8854 	}
8855 
8856 	/* Parse defined parameters from request, use defaults otherwise */
8857 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8858 		  __le16_to_cpu(cp->timeout) : 0;
8859 
8860 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8861 		   __le16_to_cpu(cp->duration) :
8862 		   hdev->def_multi_adv_rotation_duration;
8863 
8864 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8865 		       __le32_to_cpu(cp->min_interval) :
8866 		       hdev->le_adv_min_interval;
8867 
8868 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8869 		       __le32_to_cpu(cp->max_interval) :
8870 		       hdev->le_adv_max_interval;
8871 
8872 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8873 		   cp->tx_power :
8874 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8875 
8876 	/* Create advertising instance with no advertising or response data */
8877 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8878 				   timeout, duration, tx_power, min_interval,
8879 				   max_interval, 0);
8880 
8881 	if (IS_ERR(adv)) {
8882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8883 				      MGMT_STATUS_FAILED);
8884 		goto unlock;
8885 	}
8886 
8887 	/* Submit request for advertising params if ext adv available */
8888 	if (ext_adv_capable(hdev)) {
8889 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8890 				       data, data_len);
8891 		if (!cmd) {
8892 			err = -ENOMEM;
8893 			hci_remove_adv_instance(hdev, cp->instance);
8894 			goto unlock;
8895 		}
8896 
8897 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8898 					 add_ext_adv_params_complete);
8899 		if (err < 0)
8900 			mgmt_pending_free(cmd);
8901 	} else {
8902 		rp.instance = cp->instance;
8903 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8904 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8905 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8906 		err = mgmt_cmd_complete(sk, hdev->id,
8907 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8908 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8909 	}
8910 
8911 unlock:
8912 	hci_dev_unlock(hdev);
8913 
8914 	return err;
8915 }
8916 
8917 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8918 {
8919 	struct mgmt_pending_cmd *cmd = data;
8920 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8921 	struct mgmt_rp_add_advertising rp;
8922 
8923 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8924 
8925 	memset(&rp, 0, sizeof(rp));
8926 
8927 	rp.instance = cp->instance;
8928 
8929 	if (err)
8930 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8931 				mgmt_status(err));
8932 	else
8933 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8934 				  mgmt_status(err), &rp, sizeof(rp));
8935 
8936 	mgmt_pending_free(cmd);
8937 }
8938 
8939 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8940 {
8941 	struct mgmt_pending_cmd *cmd = data;
8942 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8943 	int err;
8944 
8945 	if (ext_adv_capable(hdev)) {
8946 		err = hci_update_adv_data_sync(hdev, cp->instance);
8947 		if (err)
8948 			return err;
8949 
8950 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8951 		if (err)
8952 			return err;
8953 
8954 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8955 	}
8956 
8957 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8958 }
8959 
8960 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8961 			    u16 data_len)
8962 {
8963 	struct mgmt_cp_add_ext_adv_data *cp = data;
8964 	struct mgmt_rp_add_ext_adv_data rp;
8965 	u8 schedule_instance = 0;
8966 	struct adv_info *next_instance;
8967 	struct adv_info *adv_instance;
8968 	int err = 0;
8969 	struct mgmt_pending_cmd *cmd;
8970 
8971 	BT_DBG("%s", hdev->name);
8972 
8973 	hci_dev_lock(hdev);
8974 
8975 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8976 
8977 	if (!adv_instance) {
8978 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8979 				      MGMT_STATUS_INVALID_PARAMS);
8980 		goto unlock;
8981 	}
8982 
8983 	/* In new interface, we require that we are powered to register */
8984 	if (!hdev_is_powered(hdev)) {
8985 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8986 				      MGMT_STATUS_REJECTED);
8987 		goto clear_new_instance;
8988 	}
8989 
8990 	if (adv_busy(hdev)) {
8991 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8992 				      MGMT_STATUS_BUSY);
8993 		goto clear_new_instance;
8994 	}
8995 
8996 	/* Validate new data */
8997 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8998 			       cp->adv_data_len, true) ||
8999 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9000 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9001 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9002 				      MGMT_STATUS_INVALID_PARAMS);
9003 		goto clear_new_instance;
9004 	}
9005 
9006 	/* Set the data in the advertising instance */
9007 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9008 				  cp->data, cp->scan_rsp_len,
9009 				  cp->data + cp->adv_data_len);
9010 
9011 	/* If using software rotation, determine next instance to use */
9012 	if (hdev->cur_adv_instance == cp->instance) {
9013 		/* If the currently advertised instance is being changed
9014 		 * then cancel the current advertising and schedule the
9015 		 * next instance. If there is only one instance then the
9016 		 * overridden advertising data will be visible right
9017 		 * away
9018 		 */
9019 		cancel_adv_timeout(hdev);
9020 
9021 		next_instance = hci_get_next_instance(hdev, cp->instance);
9022 		if (next_instance)
9023 			schedule_instance = next_instance->instance;
9024 	} else if (!hdev->adv_instance_timeout) {
9025 		/* Immediately advertise the new instance if no other
9026 		 * instance is currently being advertised.
9027 		 */
9028 		schedule_instance = cp->instance;
9029 	}
9030 
9031 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9032 	 * be advertised then we have no HCI communication to make.
9033 	 * Simply return.
9034 	 */
9035 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9036 		if (adv_instance->pending) {
9037 			mgmt_advertising_added(sk, hdev, cp->instance);
9038 			adv_instance->pending = false;
9039 		}
9040 		rp.instance = cp->instance;
9041 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9042 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9043 		goto unlock;
9044 	}
9045 
9046 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9047 			       data_len);
9048 	if (!cmd) {
9049 		err = -ENOMEM;
9050 		goto clear_new_instance;
9051 	}
9052 
9053 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9054 				 add_ext_adv_data_complete);
9055 	if (err < 0) {
9056 		mgmt_pending_free(cmd);
9057 		goto clear_new_instance;
9058 	}
9059 
9060 	/* We were successful in updating data, so trigger advertising_added
9061 	 * event if this is an instance that wasn't previously advertising. If
9062 	 * a failure occurs in the requests we initiated, we will remove the
9063 	 * instance again in add_advertising_complete
9064 	 */
9065 	if (adv_instance->pending)
9066 		mgmt_advertising_added(sk, hdev, cp->instance);
9067 
9068 	goto unlock;
9069 
9070 clear_new_instance:
9071 	hci_remove_adv_instance(hdev, cp->instance);
9072 
9073 unlock:
9074 	hci_dev_unlock(hdev);
9075 
9076 	return err;
9077 }
9078 
9079 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9080 					int err)
9081 {
9082 	struct mgmt_pending_cmd *cmd = data;
9083 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9084 	struct mgmt_rp_remove_advertising rp;
9085 
9086 	bt_dev_dbg(hdev, "err %d", err);
9087 
9088 	memset(&rp, 0, sizeof(rp));
9089 	rp.instance = cp->instance;
9090 
9091 	if (err)
9092 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9093 				mgmt_status(err));
9094 	else
9095 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9096 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9097 
9098 	mgmt_pending_free(cmd);
9099 }
9100 
9101 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9102 {
9103 	struct mgmt_pending_cmd *cmd = data;
9104 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9105 	int err;
9106 
9107 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9108 	if (err)
9109 		return err;
9110 
9111 	if (list_empty(&hdev->adv_instances))
9112 		err = hci_disable_advertising_sync(hdev);
9113 
9114 	return err;
9115 }
9116 
9117 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9118 			      void *data, u16 data_len)
9119 {
9120 	struct mgmt_cp_remove_advertising *cp = data;
9121 	struct mgmt_pending_cmd *cmd;
9122 	int err;
9123 
9124 	bt_dev_dbg(hdev, "sock %p", sk);
9125 
9126 	hci_dev_lock(hdev);
9127 
9128 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9129 		err = mgmt_cmd_status(sk, hdev->id,
9130 				      MGMT_OP_REMOVE_ADVERTISING,
9131 				      MGMT_STATUS_INVALID_PARAMS);
9132 		goto unlock;
9133 	}
9134 
9135 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9136 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9137 				      MGMT_STATUS_BUSY);
9138 		goto unlock;
9139 	}
9140 
9141 	if (list_empty(&hdev->adv_instances)) {
9142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9143 				      MGMT_STATUS_INVALID_PARAMS);
9144 		goto unlock;
9145 	}
9146 
9147 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9148 			       data_len);
9149 	if (!cmd) {
9150 		err = -ENOMEM;
9151 		goto unlock;
9152 	}
9153 
9154 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9155 				 remove_advertising_complete);
9156 	if (err < 0)
9157 		mgmt_pending_free(cmd);
9158 
9159 unlock:
9160 	hci_dev_unlock(hdev);
9161 
9162 	return err;
9163 }
9164 
9165 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9166 			     void *data, u16 data_len)
9167 {
9168 	struct mgmt_cp_get_adv_size_info *cp = data;
9169 	struct mgmt_rp_get_adv_size_info rp;
9170 	u32 flags, supported_flags;
9171 
9172 	bt_dev_dbg(hdev, "sock %p", sk);
9173 
9174 	if (!lmp_le_capable(hdev))
9175 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176 				       MGMT_STATUS_REJECTED);
9177 
9178 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9179 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9180 				       MGMT_STATUS_INVALID_PARAMS);
9181 
9182 	flags = __le32_to_cpu(cp->flags);
9183 
9184 	/* The current implementation only supports a subset of the specified
9185 	 * flags.
9186 	 */
9187 	supported_flags = get_supported_adv_flags(hdev);
9188 	if (flags & ~supported_flags)
9189 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9190 				       MGMT_STATUS_INVALID_PARAMS);
9191 
9192 	rp.instance = cp->instance;
9193 	rp.flags = cp->flags;
9194 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9195 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9196 
9197 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9198 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9199 }
9200 
9201 static const struct hci_mgmt_handler mgmt_handlers[] = {
9202 	{ NULL }, /* 0x0000 (no command) */
9203 	{ read_version,            MGMT_READ_VERSION_SIZE,
9204 						HCI_MGMT_NO_HDEV |
9205 						HCI_MGMT_UNTRUSTED },
9206 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9207 						HCI_MGMT_NO_HDEV |
9208 						HCI_MGMT_UNTRUSTED },
9209 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9210 						HCI_MGMT_NO_HDEV |
9211 						HCI_MGMT_UNTRUSTED },
9212 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9213 						HCI_MGMT_UNTRUSTED },
9214 	{ set_powered,             MGMT_SETTING_SIZE },
9215 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9216 	{ set_connectable,         MGMT_SETTING_SIZE },
9217 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9218 	{ set_bondable,            MGMT_SETTING_SIZE },
9219 	{ set_link_security,       MGMT_SETTING_SIZE },
9220 	{ set_ssp,                 MGMT_SETTING_SIZE },
9221 	{ set_hs,                  MGMT_SETTING_SIZE },
9222 	{ set_le,                  MGMT_SETTING_SIZE },
9223 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9224 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9225 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9226 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9227 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9228 						HCI_MGMT_VAR_LEN },
9229 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9230 						HCI_MGMT_VAR_LEN },
9231 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9232 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9233 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9234 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9235 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9236 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9237 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9238 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9239 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9240 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9241 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9242 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9243 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9244 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9245 						HCI_MGMT_VAR_LEN },
9246 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9247 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9248 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9249 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9250 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9251 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9252 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9253 	{ set_advertising,         MGMT_SETTING_SIZE },
9254 	{ set_bredr,               MGMT_SETTING_SIZE },
9255 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9256 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9257 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9258 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9259 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9260 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9261 						HCI_MGMT_VAR_LEN },
9262 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9263 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9264 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9265 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9266 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9267 						HCI_MGMT_VAR_LEN },
9268 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9269 						HCI_MGMT_NO_HDEV |
9270 						HCI_MGMT_UNTRUSTED },
9271 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9272 						HCI_MGMT_UNCONFIGURED |
9273 						HCI_MGMT_UNTRUSTED },
9274 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9275 						HCI_MGMT_UNCONFIGURED },
9276 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9277 						HCI_MGMT_UNCONFIGURED },
9278 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9279 						HCI_MGMT_VAR_LEN },
9280 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9281 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9282 						HCI_MGMT_NO_HDEV |
9283 						HCI_MGMT_UNTRUSTED },
9284 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9285 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9286 						HCI_MGMT_VAR_LEN },
9287 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9288 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9289 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9290 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9291 						HCI_MGMT_UNTRUSTED },
9292 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9293 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9294 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9295 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9296 						HCI_MGMT_VAR_LEN },
9297 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9298 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9299 						HCI_MGMT_UNTRUSTED },
9300 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9301 						HCI_MGMT_UNTRUSTED |
9302 						HCI_MGMT_HDEV_OPTIONAL },
9303 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9304 						HCI_MGMT_VAR_LEN |
9305 						HCI_MGMT_HDEV_OPTIONAL },
9306 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9307 						HCI_MGMT_UNTRUSTED },
9308 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9309 						HCI_MGMT_VAR_LEN },
9310 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9311 						HCI_MGMT_UNTRUSTED },
9312 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9313 						HCI_MGMT_VAR_LEN },
9314 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9315 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9316 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9317 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9318 						HCI_MGMT_VAR_LEN },
9319 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9320 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9321 						HCI_MGMT_VAR_LEN },
9322 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9323 						HCI_MGMT_VAR_LEN },
9324 	{ add_adv_patterns_monitor_rssi,
9325 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9326 						HCI_MGMT_VAR_LEN },
9327 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9328 						HCI_MGMT_VAR_LEN },
9329 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9330 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9331 						HCI_MGMT_VAR_LEN },
9332 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9333 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9334 };
9335 
9336 void mgmt_index_added(struct hci_dev *hdev)
9337 {
9338 	struct mgmt_ev_ext_index ev;
9339 
9340 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9341 		return;
9342 
9343 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9344 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9345 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9346 		ev.type = 0x01;
9347 	} else {
9348 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9349 				 HCI_MGMT_INDEX_EVENTS);
9350 		ev.type = 0x00;
9351 	}
9352 
9353 	ev.bus = hdev->bus;
9354 
9355 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9356 			 HCI_MGMT_EXT_INDEX_EVENTS);
9357 }
9358 
9359 void mgmt_index_removed(struct hci_dev *hdev)
9360 {
9361 	struct mgmt_ev_ext_index ev;
9362 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9363 
9364 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9365 		return;
9366 
9367 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9368 
9369 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9370 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9371 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9372 		ev.type = 0x01;
9373 	} else {
9374 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9375 				 HCI_MGMT_INDEX_EVENTS);
9376 		ev.type = 0x00;
9377 	}
9378 
9379 	ev.bus = hdev->bus;
9380 
9381 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9382 			 HCI_MGMT_EXT_INDEX_EVENTS);
9383 
9384 	/* Cancel any remaining timed work */
9385 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9386 		return;
9387 	cancel_delayed_work_sync(&hdev->discov_off);
9388 	cancel_delayed_work_sync(&hdev->service_cache);
9389 	cancel_delayed_work_sync(&hdev->rpa_expired);
9390 }
9391 
9392 void mgmt_power_on(struct hci_dev *hdev, int err)
9393 {
9394 	struct cmd_lookup match = { NULL, hdev };
9395 
9396 	bt_dev_dbg(hdev, "err %d", err);
9397 
9398 	hci_dev_lock(hdev);
9399 
9400 	if (!err) {
9401 		restart_le_actions(hdev);
9402 		hci_update_passive_scan(hdev);
9403 	}
9404 
9405 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9406 
9407 	new_settings(hdev, match.sk);
9408 
9409 	if (match.sk)
9410 		sock_put(match.sk);
9411 
9412 	hci_dev_unlock(hdev);
9413 }
9414 
9415 void __mgmt_power_off(struct hci_dev *hdev)
9416 {
9417 	struct cmd_lookup match = { NULL, hdev };
9418 	u8 zero_cod[] = { 0, 0, 0 };
9419 
9420 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9421 
9422 	/* If the power off is because of hdev unregistration let
9423 	 * use the appropriate INVALID_INDEX status. Otherwise use
9424 	 * NOT_POWERED. We cover both scenarios here since later in
9425 	 * mgmt_index_removed() any hci_conn callbacks will have already
9426 	 * been triggered, potentially causing misleading DISCONNECTED
9427 	 * status responses.
9428 	 */
9429 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9430 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9431 	else
9432 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9433 
9434 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9435 
9436 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9437 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9438 				   zero_cod, sizeof(zero_cod),
9439 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9440 		ext_info_changed(hdev, NULL);
9441 	}
9442 
9443 	new_settings(hdev, match.sk);
9444 
9445 	if (match.sk)
9446 		sock_put(match.sk);
9447 }
9448 
9449 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9450 {
9451 	struct mgmt_pending_cmd *cmd;
9452 	u8 status;
9453 
9454 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9455 	if (!cmd)
9456 		return;
9457 
9458 	if (err == -ERFKILL)
9459 		status = MGMT_STATUS_RFKILLED;
9460 	else
9461 		status = MGMT_STATUS_FAILED;
9462 
9463 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9464 
9465 	mgmt_pending_remove(cmd);
9466 }
9467 
9468 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9469 		       bool persistent)
9470 {
9471 	struct mgmt_ev_new_link_key ev;
9472 
9473 	memset(&ev, 0, sizeof(ev));
9474 
9475 	ev.store_hint = persistent;
9476 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9477 	ev.key.addr.type = BDADDR_BREDR;
9478 	ev.key.type = key->type;
9479 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9480 	ev.key.pin_len = key->pin_len;
9481 
9482 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9483 }
9484 
9485 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9486 {
9487 	switch (ltk->type) {
9488 	case SMP_LTK:
9489 	case SMP_LTK_RESPONDER:
9490 		if (ltk->authenticated)
9491 			return MGMT_LTK_AUTHENTICATED;
9492 		return MGMT_LTK_UNAUTHENTICATED;
9493 	case SMP_LTK_P256:
9494 		if (ltk->authenticated)
9495 			return MGMT_LTK_P256_AUTH;
9496 		return MGMT_LTK_P256_UNAUTH;
9497 	case SMP_LTK_P256_DEBUG:
9498 		return MGMT_LTK_P256_DEBUG;
9499 	}
9500 
9501 	return MGMT_LTK_UNAUTHENTICATED;
9502 }
9503 
9504 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9505 {
9506 	struct mgmt_ev_new_long_term_key ev;
9507 
9508 	memset(&ev, 0, sizeof(ev));
9509 
9510 	/* Devices using resolvable or non-resolvable random addresses
9511 	 * without providing an identity resolving key don't require
9512 	 * to store long term keys. Their addresses will change the
9513 	 * next time around.
9514 	 *
9515 	 * Only when a remote device provides an identity address
9516 	 * make sure the long term key is stored. If the remote
9517 	 * identity is known, the long term keys are internally
9518 	 * mapped to the identity address. So allow static random
9519 	 * and public addresses here.
9520 	 */
9521 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9522 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9523 		ev.store_hint = 0x00;
9524 	else
9525 		ev.store_hint = persistent;
9526 
9527 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9528 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9529 	ev.key.type = mgmt_ltk_type(key);
9530 	ev.key.enc_size = key->enc_size;
9531 	ev.key.ediv = key->ediv;
9532 	ev.key.rand = key->rand;
9533 
9534 	if (key->type == SMP_LTK)
9535 		ev.key.initiator = 1;
9536 
9537 	/* Make sure we copy only the significant bytes based on the
9538 	 * encryption key size, and set the rest of the value to zeroes.
9539 	 */
9540 	memcpy(ev.key.val, key->val, key->enc_size);
9541 	memset(ev.key.val + key->enc_size, 0,
9542 	       sizeof(ev.key.val) - key->enc_size);
9543 
9544 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9545 }
9546 
9547 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9548 {
9549 	struct mgmt_ev_new_irk ev;
9550 
9551 	memset(&ev, 0, sizeof(ev));
9552 
9553 	ev.store_hint = persistent;
9554 
9555 	bacpy(&ev.rpa, &irk->rpa);
9556 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9557 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9558 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9559 
9560 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9561 }
9562 
9563 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9564 		   bool persistent)
9565 {
9566 	struct mgmt_ev_new_csrk ev;
9567 
9568 	memset(&ev, 0, sizeof(ev));
9569 
9570 	/* Devices using resolvable or non-resolvable random addresses
9571 	 * without providing an identity resolving key don't require
9572 	 * to store signature resolving keys. Their addresses will change
9573 	 * the next time around.
9574 	 *
9575 	 * Only when a remote device provides an identity address
9576 	 * make sure the signature resolving key is stored. So allow
9577 	 * static random and public addresses here.
9578 	 */
9579 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9580 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9581 		ev.store_hint = 0x00;
9582 	else
9583 		ev.store_hint = persistent;
9584 
9585 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9586 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9587 	ev.key.type = csrk->type;
9588 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9589 
9590 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9591 }
9592 
9593 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9594 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9595 			 u16 max_interval, u16 latency, u16 timeout)
9596 {
9597 	struct mgmt_ev_new_conn_param ev;
9598 
9599 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9600 		return;
9601 
9602 	memset(&ev, 0, sizeof(ev));
9603 	bacpy(&ev.addr.bdaddr, bdaddr);
9604 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9605 	ev.store_hint = store_hint;
9606 	ev.min_interval = cpu_to_le16(min_interval);
9607 	ev.max_interval = cpu_to_le16(max_interval);
9608 	ev.latency = cpu_to_le16(latency);
9609 	ev.timeout = cpu_to_le16(timeout);
9610 
9611 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9612 }
9613 
9614 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9615 			   u8 *name, u8 name_len)
9616 {
9617 	struct sk_buff *skb;
9618 	struct mgmt_ev_device_connected *ev;
9619 	u16 eir_len = 0;
9620 	u32 flags = 0;
9621 
9622 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9623 		return;
9624 
9625 	/* allocate buff for LE or BR/EDR adv */
9626 	if (conn->le_adv_data_len > 0)
9627 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9628 				     sizeof(*ev) + conn->le_adv_data_len);
9629 	else
9630 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9631 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9632 				     eir_precalc_len(sizeof(conn->dev_class)));
9633 
9634 	if (!skb)
9635 		return;
9636 
9637 	ev = skb_put(skb, sizeof(*ev));
9638 	bacpy(&ev->addr.bdaddr, &conn->dst);
9639 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9640 
9641 	if (conn->out)
9642 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9643 
9644 	ev->flags = __cpu_to_le32(flags);
9645 
9646 	/* We must ensure that the EIR Data fields are ordered and
9647 	 * unique. Keep it simple for now and avoid the problem by not
9648 	 * adding any BR/EDR data to the LE adv.
9649 	 */
9650 	if (conn->le_adv_data_len > 0) {
9651 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9652 		eir_len = conn->le_adv_data_len;
9653 	} else {
9654 		if (name)
9655 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9656 
9657 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9658 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9659 						    conn->dev_class, sizeof(conn->dev_class));
9660 	}
9661 
9662 	ev->eir_len = cpu_to_le16(eir_len);
9663 
9664 	mgmt_event_skb(skb, NULL);
9665 }
9666 
9667 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9668 {
9669 	struct hci_dev *hdev = data;
9670 	struct mgmt_cp_unpair_device *cp = cmd->param;
9671 
9672 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9673 
9674 	cmd->cmd_complete(cmd, 0);
9675 	mgmt_pending_remove(cmd);
9676 }
9677 
9678 bool mgmt_powering_down(struct hci_dev *hdev)
9679 {
9680 	struct mgmt_pending_cmd *cmd;
9681 	struct mgmt_mode *cp;
9682 
9683 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9684 		return true;
9685 
9686 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9687 	if (!cmd)
9688 		return false;
9689 
9690 	cp = cmd->param;
9691 	if (!cp->val)
9692 		return true;
9693 
9694 	return false;
9695 }
9696 
9697 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9698 			      u8 link_type, u8 addr_type, u8 reason,
9699 			      bool mgmt_connected)
9700 {
9701 	struct mgmt_ev_device_disconnected ev;
9702 	struct sock *sk = NULL;
9703 
9704 	if (!mgmt_connected)
9705 		return;
9706 
9707 	if (link_type != ACL_LINK && link_type != LE_LINK)
9708 		return;
9709 
9710 	bacpy(&ev.addr.bdaddr, bdaddr);
9711 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9712 	ev.reason = reason;
9713 
9714 	/* Report disconnects due to suspend */
9715 	if (hdev->suspended)
9716 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9717 
9718 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9719 
9720 	if (sk)
9721 		sock_put(sk);
9722 }
9723 
9724 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9725 			    u8 link_type, u8 addr_type, u8 status)
9726 {
9727 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9728 	struct mgmt_cp_disconnect *cp;
9729 	struct mgmt_pending_cmd *cmd;
9730 
9731 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9732 			     hdev);
9733 
9734 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9735 	if (!cmd)
9736 		return;
9737 
9738 	cp = cmd->param;
9739 
9740 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9741 		return;
9742 
9743 	if (cp->addr.type != bdaddr_type)
9744 		return;
9745 
9746 	cmd->cmd_complete(cmd, mgmt_status(status));
9747 	mgmt_pending_remove(cmd);
9748 }
9749 
9750 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9751 {
9752 	struct mgmt_ev_connect_failed ev;
9753 
9754 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9755 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9756 					 conn->dst_type, status, true);
9757 		return;
9758 	}
9759 
9760 	bacpy(&ev.addr.bdaddr, &conn->dst);
9761 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9762 	ev.status = mgmt_status(status);
9763 
9764 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9765 }
9766 
9767 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9768 {
9769 	struct mgmt_ev_pin_code_request ev;
9770 
9771 	bacpy(&ev.addr.bdaddr, bdaddr);
9772 	ev.addr.type = BDADDR_BREDR;
9773 	ev.secure = secure;
9774 
9775 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9776 }
9777 
9778 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9779 				  u8 status)
9780 {
9781 	struct mgmt_pending_cmd *cmd;
9782 
9783 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9784 	if (!cmd)
9785 		return;
9786 
9787 	cmd->cmd_complete(cmd, mgmt_status(status));
9788 	mgmt_pending_remove(cmd);
9789 }
9790 
9791 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9792 				      u8 status)
9793 {
9794 	struct mgmt_pending_cmd *cmd;
9795 
9796 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9797 	if (!cmd)
9798 		return;
9799 
9800 	cmd->cmd_complete(cmd, mgmt_status(status));
9801 	mgmt_pending_remove(cmd);
9802 }
9803 
9804 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9805 			      u8 link_type, u8 addr_type, u32 value,
9806 			      u8 confirm_hint)
9807 {
9808 	struct mgmt_ev_user_confirm_request ev;
9809 
9810 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9811 
9812 	bacpy(&ev.addr.bdaddr, bdaddr);
9813 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9814 	ev.confirm_hint = confirm_hint;
9815 	ev.value = cpu_to_le32(value);
9816 
9817 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9818 			  NULL);
9819 }
9820 
9821 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9822 			      u8 link_type, u8 addr_type)
9823 {
9824 	struct mgmt_ev_user_passkey_request ev;
9825 
9826 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9827 
9828 	bacpy(&ev.addr.bdaddr, bdaddr);
9829 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9830 
9831 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9832 			  NULL);
9833 }
9834 
9835 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9836 				      u8 link_type, u8 addr_type, u8 status,
9837 				      u8 opcode)
9838 {
9839 	struct mgmt_pending_cmd *cmd;
9840 
9841 	cmd = pending_find(opcode, hdev);
9842 	if (!cmd)
9843 		return -ENOENT;
9844 
9845 	cmd->cmd_complete(cmd, mgmt_status(status));
9846 	mgmt_pending_remove(cmd);
9847 
9848 	return 0;
9849 }
9850 
9851 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9852 				     u8 link_type, u8 addr_type, u8 status)
9853 {
9854 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9855 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9856 }
9857 
9858 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9859 					 u8 link_type, u8 addr_type, u8 status)
9860 {
9861 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9862 					  status,
9863 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9864 }
9865 
9866 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9867 				     u8 link_type, u8 addr_type, u8 status)
9868 {
9869 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9870 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9871 }
9872 
9873 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9874 					 u8 link_type, u8 addr_type, u8 status)
9875 {
9876 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9877 					  status,
9878 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9879 }
9880 
9881 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9882 			     u8 link_type, u8 addr_type, u32 passkey,
9883 			     u8 entered)
9884 {
9885 	struct mgmt_ev_passkey_notify ev;
9886 
9887 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9888 
9889 	bacpy(&ev.addr.bdaddr, bdaddr);
9890 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9891 	ev.passkey = __cpu_to_le32(passkey);
9892 	ev.entered = entered;
9893 
9894 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9895 }
9896 
9897 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9898 {
9899 	struct mgmt_ev_auth_failed ev;
9900 	struct mgmt_pending_cmd *cmd;
9901 	u8 status = mgmt_status(hci_status);
9902 
9903 	bacpy(&ev.addr.bdaddr, &conn->dst);
9904 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9905 	ev.status = status;
9906 
9907 	cmd = find_pairing(conn);
9908 
9909 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9910 		    cmd ? cmd->sk : NULL);
9911 
9912 	if (cmd) {
9913 		cmd->cmd_complete(cmd, status);
9914 		mgmt_pending_remove(cmd);
9915 	}
9916 }
9917 
9918 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9919 {
9920 	struct cmd_lookup match = { NULL, hdev };
9921 	bool changed;
9922 
9923 	if (status) {
9924 		u8 mgmt_err = mgmt_status(status);
9925 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9926 				     cmd_status_rsp, &mgmt_err);
9927 		return;
9928 	}
9929 
9930 	if (test_bit(HCI_AUTH, &hdev->flags))
9931 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9932 	else
9933 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9934 
9935 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9936 			     &match);
9937 
9938 	if (changed)
9939 		new_settings(hdev, match.sk);
9940 
9941 	if (match.sk)
9942 		sock_put(match.sk);
9943 }
9944 
9945 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9946 {
9947 	struct cmd_lookup *match = data;
9948 
9949 	if (match->sk == NULL) {
9950 		match->sk = cmd->sk;
9951 		sock_hold(match->sk);
9952 	}
9953 }
9954 
9955 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9956 				    u8 status)
9957 {
9958 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9959 
9960 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9961 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9962 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9963 
9964 	if (!status) {
9965 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9966 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9967 		ext_info_changed(hdev, NULL);
9968 	}
9969 
9970 	if (match.sk)
9971 		sock_put(match.sk);
9972 }
9973 
9974 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9975 {
9976 	struct mgmt_cp_set_local_name ev;
9977 	struct mgmt_pending_cmd *cmd;
9978 
9979 	if (status)
9980 		return;
9981 
9982 	memset(&ev, 0, sizeof(ev));
9983 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9984 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9985 
9986 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9987 	if (!cmd) {
9988 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9989 
9990 		/* If this is a HCI command related to powering on the
9991 		 * HCI dev don't send any mgmt signals.
9992 		 */
9993 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9994 			return;
9995 
9996 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9997 			return;
9998 	}
9999 
10000 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10001 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10002 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10003 }
10004 
10005 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10006 {
10007 	int i;
10008 
10009 	for (i = 0; i < uuid_count; i++) {
10010 		if (!memcmp(uuid, uuids[i], 16))
10011 			return true;
10012 	}
10013 
10014 	return false;
10015 }
10016 
10017 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10018 {
10019 	u16 parsed = 0;
10020 
10021 	while (parsed < eir_len) {
10022 		u8 field_len = eir[0];
10023 		u8 uuid[16];
10024 		int i;
10025 
10026 		if (field_len == 0)
10027 			break;
10028 
10029 		if (eir_len - parsed < field_len + 1)
10030 			break;
10031 
10032 		switch (eir[1]) {
10033 		case EIR_UUID16_ALL:
10034 		case EIR_UUID16_SOME:
10035 			for (i = 0; i + 3 <= field_len; i += 2) {
10036 				memcpy(uuid, bluetooth_base_uuid, 16);
10037 				uuid[13] = eir[i + 3];
10038 				uuid[12] = eir[i + 2];
10039 				if (has_uuid(uuid, uuid_count, uuids))
10040 					return true;
10041 			}
10042 			break;
10043 		case EIR_UUID32_ALL:
10044 		case EIR_UUID32_SOME:
10045 			for (i = 0; i + 5 <= field_len; i += 4) {
10046 				memcpy(uuid, bluetooth_base_uuid, 16);
10047 				uuid[15] = eir[i + 5];
10048 				uuid[14] = eir[i + 4];
10049 				uuid[13] = eir[i + 3];
10050 				uuid[12] = eir[i + 2];
10051 				if (has_uuid(uuid, uuid_count, uuids))
10052 					return true;
10053 			}
10054 			break;
10055 		case EIR_UUID128_ALL:
10056 		case EIR_UUID128_SOME:
10057 			for (i = 0; i + 17 <= field_len; i += 16) {
10058 				memcpy(uuid, eir + i + 2, 16);
10059 				if (has_uuid(uuid, uuid_count, uuids))
10060 					return true;
10061 			}
10062 			break;
10063 		}
10064 
10065 		parsed += field_len + 1;
10066 		eir += field_len + 1;
10067 	}
10068 
10069 	return false;
10070 }
10071 
10072 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10073 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10074 {
10075 	/* If a RSSI threshold has been specified, and
10076 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10077 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10078 	 * is set, let it through for further processing, as we might need to
10079 	 * restart the scan.
10080 	 *
10081 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10082 	 * the results are also dropped.
10083 	 */
10084 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10085 	    (rssi == HCI_RSSI_INVALID ||
10086 	    (rssi < hdev->discovery.rssi &&
10087 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10088 		return  false;
10089 
10090 	if (hdev->discovery.uuid_count != 0) {
10091 		/* If a list of UUIDs is provided in filter, results with no
10092 		 * matching UUID should be dropped.
10093 		 */
10094 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10095 				   hdev->discovery.uuids) &&
10096 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10097 				   hdev->discovery.uuid_count,
10098 				   hdev->discovery.uuids))
10099 			return false;
10100 	}
10101 
10102 	/* If duplicate filtering does not report RSSI changes, then restart
10103 	 * scanning to ensure updated result with updated RSSI values.
10104 	 */
10105 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10106 		/* Validate RSSI value against the RSSI threshold once more. */
10107 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10108 		    rssi < hdev->discovery.rssi)
10109 			return false;
10110 	}
10111 
10112 	return true;
10113 }
10114 
10115 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10116 				  bdaddr_t *bdaddr, u8 addr_type)
10117 {
10118 	struct mgmt_ev_adv_monitor_device_lost ev;
10119 
10120 	ev.monitor_handle = cpu_to_le16(handle);
10121 	bacpy(&ev.addr.bdaddr, bdaddr);
10122 	ev.addr.type = addr_type;
10123 
10124 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10125 		   NULL);
10126 }
10127 
10128 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10129 					       struct sk_buff *skb,
10130 					       struct sock *skip_sk,
10131 					       u16 handle)
10132 {
10133 	struct sk_buff *advmon_skb;
10134 	size_t advmon_skb_len;
10135 	__le16 *monitor_handle;
10136 
10137 	if (!skb)
10138 		return;
10139 
10140 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10141 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10142 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10143 				    advmon_skb_len);
10144 	if (!advmon_skb)
10145 		return;
10146 
10147 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10148 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10149 	 * store monitor_handle of the matched monitor.
10150 	 */
10151 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10152 	*monitor_handle = cpu_to_le16(handle);
10153 	skb_put_data(advmon_skb, skb->data, skb->len);
10154 
10155 	mgmt_event_skb(advmon_skb, skip_sk);
10156 }
10157 
10158 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10159 					  bdaddr_t *bdaddr, bool report_device,
10160 					  struct sk_buff *skb,
10161 					  struct sock *skip_sk)
10162 {
10163 	struct monitored_device *dev, *tmp;
10164 	bool matched = false;
10165 	bool notified = false;
10166 
10167 	/* We have received the Advertisement Report because:
10168 	 * 1. the kernel has initiated active discovery
10169 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10170 	 *    passive scanning
10171 	 * 3. if none of the above is true, we have one or more active
10172 	 *    Advertisement Monitor
10173 	 *
10174 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10175 	 * and report ONLY one advertisement per device for the matched Monitor
10176 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10177 	 *
10178 	 * For case 3, since we are not active scanning and all advertisements
10179 	 * received are due to a matched Advertisement Monitor, report all
10180 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10181 	 */
10182 	if (report_device && !hdev->advmon_pend_notify) {
10183 		mgmt_event_skb(skb, skip_sk);
10184 		return;
10185 	}
10186 
10187 	hdev->advmon_pend_notify = false;
10188 
10189 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10190 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10191 			matched = true;
10192 
10193 			if (!dev->notified) {
10194 				mgmt_send_adv_monitor_device_found(hdev, skb,
10195 								   skip_sk,
10196 								   dev->handle);
10197 				notified = true;
10198 				dev->notified = true;
10199 			}
10200 		}
10201 
10202 		if (!dev->notified)
10203 			hdev->advmon_pend_notify = true;
10204 	}
10205 
10206 	if (!report_device &&
10207 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10208 		/* Handle 0 indicates that we are not active scanning and this
10209 		 * is a subsequent advertisement report for an already matched
10210 		 * Advertisement Monitor or the controller offloading support
10211 		 * is not available.
10212 		 */
10213 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10214 	}
10215 
10216 	if (report_device)
10217 		mgmt_event_skb(skb, skip_sk);
10218 	else
10219 		kfree_skb(skb);
10220 }
10221 
10222 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10223 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10224 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10225 			      u64 instant)
10226 {
10227 	struct sk_buff *skb;
10228 	struct mgmt_ev_mesh_device_found *ev;
10229 	int i, j;
10230 
10231 	if (!hdev->mesh_ad_types[0])
10232 		goto accepted;
10233 
10234 	/* Scan for requested AD types */
10235 	if (eir_len > 0) {
10236 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10237 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10238 				if (!hdev->mesh_ad_types[j])
10239 					break;
10240 
10241 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10242 					goto accepted;
10243 			}
10244 		}
10245 	}
10246 
10247 	if (scan_rsp_len > 0) {
10248 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10249 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10250 				if (!hdev->mesh_ad_types[j])
10251 					break;
10252 
10253 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10254 					goto accepted;
10255 			}
10256 		}
10257 	}
10258 
10259 	return;
10260 
10261 accepted:
10262 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10263 			     sizeof(*ev) + eir_len + scan_rsp_len);
10264 	if (!skb)
10265 		return;
10266 
10267 	ev = skb_put(skb, sizeof(*ev));
10268 
10269 	bacpy(&ev->addr.bdaddr, bdaddr);
10270 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10271 	ev->rssi = rssi;
10272 	ev->flags = cpu_to_le32(flags);
10273 	ev->instant = cpu_to_le64(instant);
10274 
10275 	if (eir_len > 0)
10276 		/* Copy EIR or advertising data into event */
10277 		skb_put_data(skb, eir, eir_len);
10278 
10279 	if (scan_rsp_len > 0)
10280 		/* Append scan response data to event */
10281 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10282 
10283 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10284 
10285 	mgmt_event_skb(skb, NULL);
10286 }
10287 
10288 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10289 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10290 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10291 		       u64 instant)
10292 {
10293 	struct sk_buff *skb;
10294 	struct mgmt_ev_device_found *ev;
10295 	bool report_device = hci_discovery_active(hdev);
10296 
10297 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10298 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10299 				  eir, eir_len, scan_rsp, scan_rsp_len,
10300 				  instant);
10301 
10302 	/* Don't send events for a non-kernel initiated discovery. With
10303 	 * LE one exception is if we have pend_le_reports > 0 in which
10304 	 * case we're doing passive scanning and want these events.
10305 	 */
10306 	if (!hci_discovery_active(hdev)) {
10307 		if (link_type == ACL_LINK)
10308 			return;
10309 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10310 			report_device = true;
10311 		else if (!hci_is_adv_monitoring(hdev))
10312 			return;
10313 	}
10314 
10315 	if (hdev->discovery.result_filtering) {
10316 		/* We are using service discovery */
10317 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10318 				     scan_rsp_len))
10319 			return;
10320 	}
10321 
10322 	if (hdev->discovery.limited) {
10323 		/* Check for limited discoverable bit */
10324 		if (dev_class) {
10325 			if (!(dev_class[1] & 0x20))
10326 				return;
10327 		} else {
10328 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10329 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10330 				return;
10331 		}
10332 	}
10333 
10334 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10335 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10336 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10337 	if (!skb)
10338 		return;
10339 
10340 	ev = skb_put(skb, sizeof(*ev));
10341 
10342 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10343 	 * RSSI value was reported as 0 when not available. This behavior
10344 	 * is kept when using device discovery. This is required for full
10345 	 * backwards compatibility with the API.
10346 	 *
10347 	 * However when using service discovery, the value 127 will be
10348 	 * returned when the RSSI is not available.
10349 	 */
10350 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10351 	    link_type == ACL_LINK)
10352 		rssi = 0;
10353 
10354 	bacpy(&ev->addr.bdaddr, bdaddr);
10355 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10356 	ev->rssi = rssi;
10357 	ev->flags = cpu_to_le32(flags);
10358 
10359 	if (eir_len > 0)
10360 		/* Copy EIR or advertising data into event */
10361 		skb_put_data(skb, eir, eir_len);
10362 
10363 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10364 		u8 eir_cod[5];
10365 
10366 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10367 					   dev_class, 3);
10368 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10369 	}
10370 
10371 	if (scan_rsp_len > 0)
10372 		/* Append scan response data to event */
10373 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10374 
10375 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10376 
10377 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10378 }
10379 
10380 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10381 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10382 {
10383 	struct sk_buff *skb;
10384 	struct mgmt_ev_device_found *ev;
10385 	u16 eir_len = 0;
10386 	u32 flags = 0;
10387 
10388 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10389 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10390 	if (!skb)
10391 		return;
10392 
10393 	ev = skb_put(skb, sizeof(*ev));
10394 	bacpy(&ev->addr.bdaddr, bdaddr);
10395 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10396 	ev->rssi = rssi;
10397 
10398 	if (name)
10399 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10400 	else
10401 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10402 
10403 	ev->eir_len = cpu_to_le16(eir_len);
10404 	ev->flags = cpu_to_le32(flags);
10405 
10406 	mgmt_event_skb(skb, NULL);
10407 }
10408 
10409 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10410 {
10411 	struct mgmt_ev_discovering ev;
10412 
10413 	bt_dev_dbg(hdev, "discovering %u", discovering);
10414 
10415 	memset(&ev, 0, sizeof(ev));
10416 	ev.type = hdev->discovery.type;
10417 	ev.discovering = discovering;
10418 
10419 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10420 }
10421 
10422 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10423 {
10424 	struct mgmt_ev_controller_suspend ev;
10425 
10426 	ev.suspend_state = state;
10427 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10428 }
10429 
10430 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10431 		   u8 addr_type)
10432 {
10433 	struct mgmt_ev_controller_resume ev;
10434 
10435 	ev.wake_reason = reason;
10436 	if (bdaddr) {
10437 		bacpy(&ev.addr.bdaddr, bdaddr);
10438 		ev.addr.type = addr_type;
10439 	} else {
10440 		memset(&ev.addr, 0, sizeof(ev.addr));
10441 	}
10442 
10443 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10444 }
10445 
10446 static struct hci_mgmt_chan chan = {
10447 	.channel	= HCI_CHANNEL_CONTROL,
10448 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10449 	.handlers	= mgmt_handlers,
10450 	.hdev_init	= mgmt_init_hdev,
10451 };
10452 
10453 int mgmt_init(void)
10454 {
10455 	return hci_mgmt_chan_register(&chan);
10456 }
10457 
10458 void mgmt_exit(void)
10459 {
10460 	hci_mgmt_chan_unregister(&chan);
10461 }
10462 
10463 void mgmt_cleanup(struct sock *sk)
10464 {
10465 	struct mgmt_mesh_tx *mesh_tx;
10466 	struct hci_dev *hdev;
10467 
10468 	read_lock(&hci_dev_list_lock);
10469 
10470 	list_for_each_entry(hdev, &hci_dev_list, list) {
10471 		do {
10472 			mesh_tx = mgmt_mesh_next(hdev, sk);
10473 
10474 			if (mesh_tx)
10475 				mesh_send_complete(hdev, mesh_tx, true);
10476 		} while (mesh_tx);
10477 	}
10478 
10479 	read_unlock(&hci_dev_list_lock);
10480 }
10481