xref: /linux/net/bluetooth/mgmt.c (revision 60684c2bd35064043360e6f716d1b7c20e967b7d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 			if (IS_ENABLED(CONFIG_BT_HS))
839 				settings |= MGMT_SETTING_HS;
840 		}
841 
842 		if (lmp_sc_capable(hdev))
843 			settings |= MGMT_SETTING_SECURE_CONN;
844 
845 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 			     &hdev->quirks))
847 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 	}
849 
850 	if (lmp_le_capable(hdev)) {
851 		settings |= MGMT_SETTING_LE;
852 		settings |= MGMT_SETTING_SECURE_CONN;
853 		settings |= MGMT_SETTING_PRIVACY;
854 		settings |= MGMT_SETTING_STATIC_ADDRESS;
855 		settings |= MGMT_SETTING_ADVERTISING;
856 	}
857 
858 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 	    hdev->set_bdaddr)
860 		settings |= MGMT_SETTING_CONFIGURATION;
861 
862 	if (cis_central_capable(hdev))
863 		settings |= MGMT_SETTING_CIS_CENTRAL;
864 
865 	if (cis_peripheral_capable(hdev))
866 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
867 
868 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
869 
870 	return settings;
871 }
872 
873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875 	u32 settings = 0;
876 
877 	if (hdev_is_powered(hdev))
878 		settings |= MGMT_SETTING_POWERED;
879 
880 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 		settings |= MGMT_SETTING_CONNECTABLE;
882 
883 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 		settings |= MGMT_SETTING_DISCOVERABLE;
888 
889 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 		settings |= MGMT_SETTING_BONDABLE;
891 
892 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 		settings |= MGMT_SETTING_BREDR;
894 
895 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 		settings |= MGMT_SETTING_LE;
897 
898 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 		settings |= MGMT_SETTING_LINK_SECURITY;
900 
901 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 		settings |= MGMT_SETTING_SSP;
903 
904 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 		settings |= MGMT_SETTING_HS;
906 
907 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 		settings |= MGMT_SETTING_ADVERTISING;
909 
910 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 		settings |= MGMT_SETTING_SECURE_CONN;
912 
913 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 		settings |= MGMT_SETTING_DEBUG_KEYS;
915 
916 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 		settings |= MGMT_SETTING_PRIVACY;
918 
919 	/* The current setting for static address has two purposes. The
920 	 * first is to indicate if the static address will be used and
921 	 * the second is to indicate if it is actually set.
922 	 *
923 	 * This means if the static address is not configured, this flag
924 	 * will never be set. If the address is configured, then if the
925 	 * address is actually used decides if the flag is set or not.
926 	 *
927 	 * For single mode LE only controllers and dual-mode controllers
928 	 * with BR/EDR disabled, the existence of the static address will
929 	 * be evaluated.
930 	 */
931 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 			settings |= MGMT_SETTING_STATIC_ADDRESS;
936 	}
937 
938 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940 
941 	if (cis_central_capable(hdev))
942 		settings |= MGMT_SETTING_CIS_CENTRAL;
943 
944 	if (cis_peripheral_capable(hdev))
945 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
946 
947 	return settings;
948 }
949 
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954 
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957 	struct mgmt_pending_cmd *cmd;
958 
959 	/* If there's a pending mgmt command the flags will not yet have
960 	 * their final values, so check for this first.
961 	 */
962 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963 	if (cmd) {
964 		struct mgmt_mode *cp = cmd->param;
965 		if (cp->val == 0x01)
966 			return LE_AD_GENERAL;
967 		else if (cp->val == 0x02)
968 			return LE_AD_LIMITED;
969 	} else {
970 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 			return LE_AD_LIMITED;
972 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 			return LE_AD_GENERAL;
974 	}
975 
976 	return 0;
977 }
978 
979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981 	struct mgmt_pending_cmd *cmd;
982 
983 	/* If there's a pending mgmt command the flag will not yet have
984 	 * it's final value, so check for this first.
985 	 */
986 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987 	if (cmd) {
988 		struct mgmt_mode *cp = cmd->param;
989 
990 		return cp->val;
991 	}
992 
993 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995 
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998 	hci_update_eir_sync(hdev);
999 	hci_update_class_sync(hdev);
1000 
1001 	return 0;
1002 }
1003 
1004 static void service_cache_off(struct work_struct *work)
1005 {
1006 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 					    service_cache.work);
1008 
1009 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010 		return;
1011 
1012 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014 
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017 	/* The generation of a new RPA and programming it into the
1018 	 * controller happens in the hci_req_enable_advertising()
1019 	 * function.
1020 	 */
1021 	if (ext_adv_capable(hdev))
1022 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023 	else
1024 		return hci_enable_advertising_sync(hdev);
1025 }
1026 
1027 static void rpa_expired(struct work_struct *work)
1028 {
1029 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1030 					    rpa_expired.work);
1031 
1032 	bt_dev_dbg(hdev, "");
1033 
1034 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035 
1036 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037 		return;
1038 
1039 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041 
1042 static void discov_off(struct work_struct *work)
1043 {
1044 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1045 					    discov_off.work);
1046 
1047 	bt_dev_dbg(hdev, "");
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	/* When discoverable timeout triggers, then just make sure
1052 	 * the limited discoverable flag is cleared. Even in the case
1053 	 * of a timeout triggered from general discoverable, it is
1054 	 * safe to unconditionally clear the flag.
1055 	 */
1056 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058 	hdev->discov_timeout = 0;
1059 
1060 	hci_update_discoverable(hdev);
1061 
1062 	mgmt_new_settings(hdev);
1063 
1064 	hci_dev_unlock(hdev);
1065 }
1066 
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1068 
1069 static void mesh_send_complete(struct hci_dev *hdev,
1070 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1071 {
1072 	u8 handle = mesh_tx->handle;
1073 
1074 	if (!silent)
1075 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076 			   sizeof(handle), NULL);
1077 
1078 	mgmt_mesh_remove(mesh_tx);
1079 }
1080 
1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1082 {
1083 	struct mgmt_mesh_tx *mesh_tx;
1084 
1085 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086 	hci_disable_advertising_sync(hdev);
1087 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1088 
1089 	if (mesh_tx)
1090 		mesh_send_complete(hdev, mesh_tx, false);
1091 
1092 	return 0;
1093 }
1094 
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1098 {
1099 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 
1101 	if (!mesh_tx)
1102 		return;
1103 
1104 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105 				 mesh_send_start_complete);
1106 
1107 	if (err < 0)
1108 		mesh_send_complete(hdev, mesh_tx, false);
1109 	else
1110 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1111 }
1112 
1113 static void mesh_send_done(struct work_struct *work)
1114 {
1115 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1116 					    mesh_send_done.work);
1117 
1118 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1119 		return;
1120 
1121 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1122 }
1123 
1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1125 {
1126 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1127 		return;
1128 
1129 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1130 
1131 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1135 
1136 	/* Non-mgmt controlled devices get this bit set
1137 	 * implicitly so that pairing works for them, however
1138 	 * for mgmt we require user-space to explicitly enable
1139 	 * it
1140 	 */
1141 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1142 
1143 	hci_dev_set_flag(hdev, HCI_MGMT);
1144 }
1145 
1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147 				void *data, u16 data_len)
1148 {
1149 	struct mgmt_rp_read_info rp;
1150 
1151 	bt_dev_dbg(hdev, "sock %p", sk);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	memset(&rp, 0, sizeof(rp));
1156 
1157 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1158 
1159 	rp.version = hdev->hci_ver;
1160 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1161 
1162 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1164 
1165 	memcpy(rp.dev_class, hdev->dev_class, 3);
1166 
1167 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1169 
1170 	hci_dev_unlock(hdev);
1171 
1172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1173 				 sizeof(rp));
1174 }
1175 
1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1177 {
1178 	u16 eir_len = 0;
1179 	size_t name_len;
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183 					  hdev->dev_class, 3);
1184 
1185 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1187 					  hdev->appearance);
1188 
1189 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191 				  hdev->dev_name, name_len);
1192 
1193 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195 				  hdev->short_name, name_len);
1196 
1197 	return eir_len;
1198 }
1199 
1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201 				    void *data, u16 data_len)
1202 {
1203 	char buf[512];
1204 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1205 	u16 eir_len;
1206 
1207 	bt_dev_dbg(hdev, "sock %p", sk);
1208 
1209 	memset(&buf, 0, sizeof(buf));
1210 
1211 	hci_dev_lock(hdev);
1212 
1213 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1214 
1215 	rp->version = hdev->hci_ver;
1216 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1217 
1218 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1220 
1221 
1222 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223 	rp->eir_len = cpu_to_le16(eir_len);
1224 
1225 	hci_dev_unlock(hdev);
1226 
1227 	/* If this command is called at least once, then the events
1228 	 * for class of device and local name changes are disabled
1229 	 * and only the new extended controller information event
1230 	 * is used.
1231 	 */
1232 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1235 
1236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237 				 sizeof(*rp) + eir_len);
1238 }
1239 
1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 	char buf[512];
1243 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1244 	u16 eir_len;
1245 
1246 	memset(buf, 0, sizeof(buf));
1247 
1248 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249 	ev->eir_len = cpu_to_le16(eir_len);
1250 
1251 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252 				  sizeof(*ev) + eir_len,
1253 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1254 }
1255 
1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1257 {
1258 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1259 
1260 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1261 				 sizeof(settings));
1262 }
1263 
1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1265 {
1266 	struct mgmt_ev_advertising_added ev;
1267 
1268 	ev.instance = instance;
1269 
1270 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1271 }
1272 
1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1274 			      u8 instance)
1275 {
1276 	struct mgmt_ev_advertising_removed ev;
1277 
1278 	ev.instance = instance;
1279 
1280 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1281 }
1282 
1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1284 {
1285 	if (hdev->adv_instance_timeout) {
1286 		hdev->adv_instance_timeout = 0;
1287 		cancel_delayed_work(&hdev->adv_instance_expire);
1288 	}
1289 }
1290 
1291 /* This function requires the caller holds hdev->lock */
1292 static void restart_le_actions(struct hci_dev *hdev)
1293 {
1294 	struct hci_conn_params *p;
1295 
1296 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1297 		/* Needed for AUTO_OFF case where might not "really"
1298 		 * have been powered off.
1299 		 */
1300 		list_del_init(&p->action);
1301 
1302 		switch (p->auto_connect) {
1303 		case HCI_AUTO_CONN_DIRECT:
1304 		case HCI_AUTO_CONN_ALWAYS:
1305 			list_add(&p->action, &hdev->pend_le_conns);
1306 			break;
1307 		case HCI_AUTO_CONN_REPORT:
1308 			list_add(&p->action, &hdev->pend_le_reports);
1309 			break;
1310 		default:
1311 			break;
1312 		}
1313 	}
1314 }
1315 
1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1317 {
1318 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1319 
1320 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1322 }
1323 
1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1325 {
1326 	struct mgmt_pending_cmd *cmd = data;
1327 	struct mgmt_mode *cp;
1328 
1329 	/* Make sure cmd still outstanding. */
1330 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1331 		return;
1332 
1333 	cp = cmd->param;
1334 
1335 	bt_dev_dbg(hdev, "err %d", err);
1336 
1337 	if (!err) {
1338 		if (cp->val) {
1339 			hci_dev_lock(hdev);
1340 			restart_le_actions(hdev);
1341 			hci_update_passive_scan(hdev);
1342 			hci_dev_unlock(hdev);
1343 		}
1344 
1345 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1346 
1347 		/* Only call new_setting for power on as power off is deferred
1348 		 * to hdev->power_off work which does call hci_dev_do_close.
1349 		 */
1350 		if (cp->val)
1351 			new_settings(hdev, cmd->sk);
1352 	} else {
1353 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1354 				mgmt_status(err));
1355 	}
1356 
1357 	mgmt_pending_remove(cmd);
1358 }
1359 
1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1361 {
1362 	struct mgmt_pending_cmd *cmd = data;
1363 	struct mgmt_mode *cp = cmd->param;
1364 
1365 	BT_DBG("%s", hdev->name);
1366 
1367 	return hci_set_powered_sync(hdev, cp->val);
1368 }
1369 
1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371 		       u16 len)
1372 {
1373 	struct mgmt_mode *cp = data;
1374 	struct mgmt_pending_cmd *cmd;
1375 	int err;
1376 
1377 	bt_dev_dbg(hdev, "sock %p", sk);
1378 
1379 	if (cp->val != 0x00 && cp->val != 0x01)
1380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381 				       MGMT_STATUS_INVALID_PARAMS);
1382 
1383 	hci_dev_lock(hdev);
1384 
1385 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 				      MGMT_STATUS_BUSY);
1388 		goto failed;
1389 	}
1390 
1391 	if (!!cp->val == hdev_is_powered(hdev)) {
1392 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1393 		goto failed;
1394 	}
1395 
1396 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1397 	if (!cmd) {
1398 		err = -ENOMEM;
1399 		goto failed;
1400 	}
1401 
1402 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1403 				 mgmt_set_powered_complete);
1404 
1405 	if (err < 0)
1406 		mgmt_pending_remove(cmd);
1407 
1408 failed:
1409 	hci_dev_unlock(hdev);
1410 	return err;
1411 }
1412 
1413 int mgmt_new_settings(struct hci_dev *hdev)
1414 {
1415 	return new_settings(hdev, NULL);
1416 }
1417 
1418 struct cmd_lookup {
1419 	struct sock *sk;
1420 	struct hci_dev *hdev;
1421 	u8 mgmt_status;
1422 };
1423 
1424 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1425 {
1426 	struct cmd_lookup *match = data;
1427 
1428 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1429 
1430 	list_del(&cmd->list);
1431 
1432 	if (match->sk == NULL) {
1433 		match->sk = cmd->sk;
1434 		sock_hold(match->sk);
1435 	}
1436 
1437 	mgmt_pending_free(cmd);
1438 }
1439 
1440 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1441 {
1442 	u8 *status = data;
1443 
1444 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 	mgmt_pending_remove(cmd);
1446 }
1447 
1448 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	if (cmd->cmd_complete) {
1451 		u8 *status = data;
1452 
1453 		cmd->cmd_complete(cmd, *status);
1454 		mgmt_pending_remove(cmd);
1455 
1456 		return;
1457 	}
1458 
1459 	cmd_status_rsp(cmd, data);
1460 }
1461 
1462 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1463 {
1464 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1465 				 cmd->param, cmd->param_len);
1466 }
1467 
1468 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469 {
1470 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 				 cmd->param, sizeof(struct mgmt_addr_info));
1472 }
1473 
1474 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1475 {
1476 	if (!lmp_bredr_capable(hdev))
1477 		return MGMT_STATUS_NOT_SUPPORTED;
1478 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1479 		return MGMT_STATUS_REJECTED;
1480 	else
1481 		return MGMT_STATUS_SUCCESS;
1482 }
1483 
1484 static u8 mgmt_le_support(struct hci_dev *hdev)
1485 {
1486 	if (!lmp_le_capable(hdev))
1487 		return MGMT_STATUS_NOT_SUPPORTED;
1488 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1489 		return MGMT_STATUS_REJECTED;
1490 	else
1491 		return MGMT_STATUS_SUCCESS;
1492 }
1493 
1494 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1495 					   int err)
1496 {
1497 	struct mgmt_pending_cmd *cmd = data;
1498 
1499 	bt_dev_dbg(hdev, "err %d", err);
1500 
1501 	/* Make sure cmd still outstanding. */
1502 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1503 		return;
1504 
1505 	hci_dev_lock(hdev);
1506 
1507 	if (err) {
1508 		u8 mgmt_err = mgmt_status(err);
1509 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1511 		goto done;
1512 	}
1513 
1514 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1515 	    hdev->discov_timeout > 0) {
1516 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1517 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1518 	}
1519 
1520 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1521 	new_settings(hdev, cmd->sk);
1522 
1523 done:
1524 	mgmt_pending_remove(cmd);
1525 	hci_dev_unlock(hdev);
1526 }
1527 
1528 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1529 {
1530 	BT_DBG("%s", hdev->name);
1531 
1532 	return hci_update_discoverable_sync(hdev);
1533 }
1534 
1535 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1536 			    u16 len)
1537 {
1538 	struct mgmt_cp_set_discoverable *cp = data;
1539 	struct mgmt_pending_cmd *cmd;
1540 	u16 timeout;
1541 	int err;
1542 
1543 	bt_dev_dbg(hdev, "sock %p", sk);
1544 
1545 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1546 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1548 				       MGMT_STATUS_REJECTED);
1549 
1550 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 				       MGMT_STATUS_INVALID_PARAMS);
1553 
1554 	timeout = __le16_to_cpu(cp->timeout);
1555 
1556 	/* Disabling discoverable requires that no timeout is set,
1557 	 * and enabling limited discoverable requires a timeout.
1558 	 */
1559 	if ((cp->val == 0x00 && timeout > 0) ||
1560 	    (cp->val == 0x02 && timeout == 0))
1561 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 				       MGMT_STATUS_INVALID_PARAMS);
1563 
1564 	hci_dev_lock(hdev);
1565 
1566 	if (!hdev_is_powered(hdev) && timeout > 0) {
1567 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 				      MGMT_STATUS_NOT_POWERED);
1569 		goto failed;
1570 	}
1571 
1572 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1573 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1574 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1575 				      MGMT_STATUS_BUSY);
1576 		goto failed;
1577 	}
1578 
1579 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 				      MGMT_STATUS_REJECTED);
1582 		goto failed;
1583 	}
1584 
1585 	if (hdev->advertising_paused) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	if (!hdev_is_powered(hdev)) {
1592 		bool changed = false;
1593 
1594 		/* Setting limited discoverable when powered off is
1595 		 * not a valid operation since it requires a timeout
1596 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1597 		 */
1598 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1600 			changed = true;
1601 		}
1602 
1603 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1604 		if (err < 0)
1605 			goto failed;
1606 
1607 		if (changed)
1608 			err = new_settings(hdev, sk);
1609 
1610 		goto failed;
1611 	}
1612 
1613 	/* If the current mode is the same, then just update the timeout
1614 	 * value with the new value. And if only the timeout gets updated,
1615 	 * then no need for any HCI transactions.
1616 	 */
1617 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1618 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1619 						   HCI_LIMITED_DISCOVERABLE)) {
1620 		cancel_delayed_work(&hdev->discov_off);
1621 		hdev->discov_timeout = timeout;
1622 
1623 		if (cp->val && hdev->discov_timeout > 0) {
1624 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625 			queue_delayed_work(hdev->req_workqueue,
1626 					   &hdev->discov_off, to);
1627 		}
1628 
1629 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 		goto failed;
1631 	}
1632 
1633 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1634 	if (!cmd) {
1635 		err = -ENOMEM;
1636 		goto failed;
1637 	}
1638 
1639 	/* Cancel any potential discoverable timeout that might be
1640 	 * still active and store new timeout value. The arming of
1641 	 * the timeout happens in the complete handler.
1642 	 */
1643 	cancel_delayed_work(&hdev->discov_off);
1644 	hdev->discov_timeout = timeout;
1645 
1646 	if (cp->val)
1647 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1648 	else
1649 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1650 
1651 	/* Limited discoverable mode */
1652 	if (cp->val == 0x02)
1653 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1654 	else
1655 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1656 
1657 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1658 				 mgmt_set_discoverable_complete);
1659 
1660 	if (err < 0)
1661 		mgmt_pending_remove(cmd);
1662 
1663 failed:
1664 	hci_dev_unlock(hdev);
1665 	return err;
1666 }
1667 
1668 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1669 					  int err)
1670 {
1671 	struct mgmt_pending_cmd *cmd = data;
1672 
1673 	bt_dev_dbg(hdev, "err %d", err);
1674 
1675 	/* Make sure cmd still outstanding. */
1676 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1677 		return;
1678 
1679 	hci_dev_lock(hdev);
1680 
1681 	if (err) {
1682 		u8 mgmt_err = mgmt_status(err);
1683 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1684 		goto done;
1685 	}
1686 
1687 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1688 	new_settings(hdev, cmd->sk);
1689 
1690 done:
1691 	if (cmd)
1692 		mgmt_pending_remove(cmd);
1693 
1694 	hci_dev_unlock(hdev);
1695 }
1696 
1697 static int set_connectable_update_settings(struct hci_dev *hdev,
1698 					   struct sock *sk, u8 val)
1699 {
1700 	bool changed = false;
1701 	int err;
1702 
1703 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1704 		changed = true;
1705 
1706 	if (val) {
1707 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1708 	} else {
1709 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1710 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1711 	}
1712 
1713 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1714 	if (err < 0)
1715 		return err;
1716 
1717 	if (changed) {
1718 		hci_update_scan(hdev);
1719 		hci_update_passive_scan(hdev);
1720 		return new_settings(hdev, sk);
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1727 {
1728 	BT_DBG("%s", hdev->name);
1729 
1730 	return hci_update_connectable_sync(hdev);
1731 }
1732 
1733 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1734 			   u16 len)
1735 {
1736 	struct mgmt_mode *cp = data;
1737 	struct mgmt_pending_cmd *cmd;
1738 	int err;
1739 
1740 	bt_dev_dbg(hdev, "sock %p", sk);
1741 
1742 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1743 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1745 				       MGMT_STATUS_REJECTED);
1746 
1747 	if (cp->val != 0x00 && cp->val != 0x01)
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 				       MGMT_STATUS_INVALID_PARAMS);
1750 
1751 	hci_dev_lock(hdev);
1752 
1753 	if (!hdev_is_powered(hdev)) {
1754 		err = set_connectable_update_settings(hdev, sk, cp->val);
1755 		goto failed;
1756 	}
1757 
1758 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1759 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1760 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1761 				      MGMT_STATUS_BUSY);
1762 		goto failed;
1763 	}
1764 
1765 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1766 	if (!cmd) {
1767 		err = -ENOMEM;
1768 		goto failed;
1769 	}
1770 
1771 	if (cp->val) {
1772 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1773 	} else {
1774 		if (hdev->discov_timeout > 0)
1775 			cancel_delayed_work(&hdev->discov_off);
1776 
1777 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1778 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1779 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1780 	}
1781 
1782 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1783 				 mgmt_set_connectable_complete);
1784 
1785 	if (err < 0)
1786 		mgmt_pending_remove(cmd);
1787 
1788 failed:
1789 	hci_dev_unlock(hdev);
1790 	return err;
1791 }
1792 
1793 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1794 			u16 len)
1795 {
1796 	struct mgmt_mode *cp = data;
1797 	bool changed;
1798 	int err;
1799 
1800 	bt_dev_dbg(hdev, "sock %p", sk);
1801 
1802 	if (cp->val != 0x00 && cp->val != 0x01)
1803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1804 				       MGMT_STATUS_INVALID_PARAMS);
1805 
1806 	hci_dev_lock(hdev);
1807 
1808 	if (cp->val)
1809 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1810 	else
1811 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1812 
1813 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1814 	if (err < 0)
1815 		goto unlock;
1816 
1817 	if (changed) {
1818 		/* In limited privacy mode the change of bondable mode
1819 		 * may affect the local advertising address.
1820 		 */
1821 		hci_update_discoverable(hdev);
1822 
1823 		err = new_settings(hdev, sk);
1824 	}
1825 
1826 unlock:
1827 	hci_dev_unlock(hdev);
1828 	return err;
1829 }
1830 
1831 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1832 			     u16 len)
1833 {
1834 	struct mgmt_mode *cp = data;
1835 	struct mgmt_pending_cmd *cmd;
1836 	u8 val, status;
1837 	int err;
1838 
1839 	bt_dev_dbg(hdev, "sock %p", sk);
1840 
1841 	status = mgmt_bredr_support(hdev);
1842 	if (status)
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1844 				       status);
1845 
1846 	if (cp->val != 0x00 && cp->val != 0x01)
1847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1848 				       MGMT_STATUS_INVALID_PARAMS);
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	if (!hdev_is_powered(hdev)) {
1853 		bool changed = false;
1854 
1855 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1856 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1857 			changed = true;
1858 		}
1859 
1860 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1861 		if (err < 0)
1862 			goto failed;
1863 
1864 		if (changed)
1865 			err = new_settings(hdev, sk);
1866 
1867 		goto failed;
1868 	}
1869 
1870 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1872 				      MGMT_STATUS_BUSY);
1873 		goto failed;
1874 	}
1875 
1876 	val = !!cp->val;
1877 
1878 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1879 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1880 		goto failed;
1881 	}
1882 
1883 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1884 	if (!cmd) {
1885 		err = -ENOMEM;
1886 		goto failed;
1887 	}
1888 
1889 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1890 	if (err < 0) {
1891 		mgmt_pending_remove(cmd);
1892 		goto failed;
1893 	}
1894 
1895 failed:
1896 	hci_dev_unlock(hdev);
1897 	return err;
1898 }
1899 
1900 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1901 {
1902 	struct cmd_lookup match = { NULL, hdev };
1903 	struct mgmt_pending_cmd *cmd = data;
1904 	struct mgmt_mode *cp = cmd->param;
1905 	u8 enable = cp->val;
1906 	bool changed;
1907 
1908 	/* Make sure cmd still outstanding. */
1909 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1910 		return;
1911 
1912 	if (err) {
1913 		u8 mgmt_err = mgmt_status(err);
1914 
1915 		if (enable && hci_dev_test_and_clear_flag(hdev,
1916 							  HCI_SSP_ENABLED)) {
1917 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1918 			new_settings(hdev, NULL);
1919 		}
1920 
1921 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1922 				     &mgmt_err);
1923 		return;
1924 	}
1925 
1926 	if (enable) {
1927 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1928 	} else {
1929 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1930 
1931 		if (!changed)
1932 			changed = hci_dev_test_and_clear_flag(hdev,
1933 							      HCI_HS_ENABLED);
1934 		else
1935 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 	}
1937 
1938 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1939 
1940 	if (changed)
1941 		new_settings(hdev, match.sk);
1942 
1943 	if (match.sk)
1944 		sock_put(match.sk);
1945 
1946 	hci_update_eir_sync(hdev);
1947 }
1948 
1949 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1950 {
1951 	struct mgmt_pending_cmd *cmd = data;
1952 	struct mgmt_mode *cp = cmd->param;
1953 	bool changed = false;
1954 	int err;
1955 
1956 	if (cp->val)
1957 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1958 
1959 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1960 
1961 	if (!err && changed)
1962 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1963 
1964 	return err;
1965 }
1966 
1967 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1968 {
1969 	struct mgmt_mode *cp = data;
1970 	struct mgmt_pending_cmd *cmd;
1971 	u8 status;
1972 	int err;
1973 
1974 	bt_dev_dbg(hdev, "sock %p", sk);
1975 
1976 	status = mgmt_bredr_support(hdev);
1977 	if (status)
1978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1979 
1980 	if (!lmp_ssp_capable(hdev))
1981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1982 				       MGMT_STATUS_NOT_SUPPORTED);
1983 
1984 	if (cp->val != 0x00 && cp->val != 0x01)
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_INVALID_PARAMS);
1987 
1988 	hci_dev_lock(hdev);
1989 
1990 	if (!hdev_is_powered(hdev)) {
1991 		bool changed;
1992 
1993 		if (cp->val) {
1994 			changed = !hci_dev_test_and_set_flag(hdev,
1995 							     HCI_SSP_ENABLED);
1996 		} else {
1997 			changed = hci_dev_test_and_clear_flag(hdev,
1998 							      HCI_SSP_ENABLED);
1999 			if (!changed)
2000 				changed = hci_dev_test_and_clear_flag(hdev,
2001 								      HCI_HS_ENABLED);
2002 			else
2003 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2004 		}
2005 
2006 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2007 		if (err < 0)
2008 			goto failed;
2009 
2010 		if (changed)
2011 			err = new_settings(hdev, sk);
2012 
2013 		goto failed;
2014 	}
2015 
2016 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2018 				      MGMT_STATUS_BUSY);
2019 		goto failed;
2020 	}
2021 
2022 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2023 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2024 		goto failed;
2025 	}
2026 
2027 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2028 	if (!cmd)
2029 		err = -ENOMEM;
2030 	else
2031 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2032 					 set_ssp_complete);
2033 
2034 	if (err < 0) {
2035 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 				      MGMT_STATUS_FAILED);
2037 
2038 		if (cmd)
2039 			mgmt_pending_remove(cmd);
2040 	}
2041 
2042 failed:
2043 	hci_dev_unlock(hdev);
2044 	return err;
2045 }
2046 
2047 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 {
2049 	struct mgmt_mode *cp = data;
2050 	bool changed;
2051 	u8 status;
2052 	int err;
2053 
2054 	bt_dev_dbg(hdev, "sock %p", sk);
2055 
2056 	if (!IS_ENABLED(CONFIG_BT_HS))
2057 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 				       MGMT_STATUS_NOT_SUPPORTED);
2059 
2060 	status = mgmt_bredr_support(hdev);
2061 	if (status)
2062 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2063 
2064 	if (!lmp_ssp_capable(hdev))
2065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 
2068 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2069 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 				       MGMT_STATUS_REJECTED);
2071 
2072 	if (cp->val != 0x00 && cp->val != 0x01)
2073 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 				       MGMT_STATUS_INVALID_PARAMS);
2075 
2076 	hci_dev_lock(hdev);
2077 
2078 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2079 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 				      MGMT_STATUS_BUSY);
2081 		goto unlock;
2082 	}
2083 
2084 	if (cp->val) {
2085 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2086 	} else {
2087 		if (hdev_is_powered(hdev)) {
2088 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 					      MGMT_STATUS_REJECTED);
2090 			goto unlock;
2091 		}
2092 
2093 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2094 	}
2095 
2096 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2097 	if (err < 0)
2098 		goto unlock;
2099 
2100 	if (changed)
2101 		err = new_settings(hdev, sk);
2102 
2103 unlock:
2104 	hci_dev_unlock(hdev);
2105 	return err;
2106 }
2107 
2108 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2109 {
2110 	struct cmd_lookup match = { NULL, hdev };
2111 	u8 status = mgmt_status(err);
2112 
2113 	bt_dev_dbg(hdev, "err %d", err);
2114 
2115 	if (status) {
2116 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2117 							&status);
2118 		return;
2119 	}
2120 
2121 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2122 
2123 	new_settings(hdev, match.sk);
2124 
2125 	if (match.sk)
2126 		sock_put(match.sk);
2127 }
2128 
2129 static int set_le_sync(struct hci_dev *hdev, void *data)
2130 {
2131 	struct mgmt_pending_cmd *cmd = data;
2132 	struct mgmt_mode *cp = cmd->param;
2133 	u8 val = !!cp->val;
2134 	int err;
2135 
2136 	if (!val) {
2137 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2138 
2139 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2140 			hci_disable_advertising_sync(hdev);
2141 
2142 		if (ext_adv_capable(hdev))
2143 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2144 	} else {
2145 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2146 	}
2147 
2148 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2149 
2150 	/* Make sure the controller has a good default for
2151 	 * advertising data. Restrict the update to when LE
2152 	 * has actually been enabled. During power on, the
2153 	 * update in powered_update_hci will take care of it.
2154 	 */
2155 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2156 		if (ext_adv_capable(hdev)) {
2157 			int status;
2158 
2159 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2160 			if (!status)
2161 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2162 		} else {
2163 			hci_update_adv_data_sync(hdev, 0x00);
2164 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2165 		}
2166 
2167 		hci_update_passive_scan(hdev);
2168 	}
2169 
2170 	return err;
2171 }
2172 
2173 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2174 {
2175 	struct mgmt_pending_cmd *cmd = data;
2176 	u8 status = mgmt_status(err);
2177 	struct sock *sk = cmd->sk;
2178 
2179 	if (status) {
2180 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2181 				     cmd_status_rsp, &status);
2182 		return;
2183 	}
2184 
2185 	mgmt_pending_remove(cmd);
2186 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2187 }
2188 
2189 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2190 {
2191 	struct mgmt_pending_cmd *cmd = data;
2192 	struct mgmt_cp_set_mesh *cp = cmd->param;
2193 	size_t len = cmd->param_len;
2194 
2195 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2196 
2197 	if (cp->enable)
2198 		hci_dev_set_flag(hdev, HCI_MESH);
2199 	else
2200 		hci_dev_clear_flag(hdev, HCI_MESH);
2201 
2202 	len -= sizeof(*cp);
2203 
2204 	/* If filters don't fit, forward all adv pkts */
2205 	if (len <= sizeof(hdev->mesh_ad_types))
2206 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2207 
2208 	hci_update_passive_scan_sync(hdev);
2209 	return 0;
2210 }
2211 
2212 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2213 {
2214 	struct mgmt_cp_set_mesh *cp = data;
2215 	struct mgmt_pending_cmd *cmd;
2216 	int err = 0;
2217 
2218 	bt_dev_dbg(hdev, "sock %p", sk);
2219 
2220 	if (!lmp_le_capable(hdev) ||
2221 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2222 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2223 				       MGMT_STATUS_NOT_SUPPORTED);
2224 
2225 	if (cp->enable != 0x00 && cp->enable != 0x01)
2226 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 				       MGMT_STATUS_INVALID_PARAMS);
2228 
2229 	hci_dev_lock(hdev);
2230 
2231 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2232 	if (!cmd)
2233 		err = -ENOMEM;
2234 	else
2235 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2236 					 set_mesh_complete);
2237 
2238 	if (err < 0) {
2239 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2240 				      MGMT_STATUS_FAILED);
2241 
2242 		if (cmd)
2243 			mgmt_pending_remove(cmd);
2244 	}
2245 
2246 	hci_dev_unlock(hdev);
2247 	return err;
2248 }
2249 
2250 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2251 {
2252 	struct mgmt_mesh_tx *mesh_tx = data;
2253 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2254 	unsigned long mesh_send_interval;
2255 	u8 mgmt_err = mgmt_status(err);
2256 
2257 	/* Report any errors here, but don't report completion */
2258 
2259 	if (mgmt_err) {
2260 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2261 		/* Send Complete Error Code for handle */
2262 		mesh_send_complete(hdev, mesh_tx, false);
2263 		return;
2264 	}
2265 
2266 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2267 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2268 			   mesh_send_interval);
2269 }
2270 
2271 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2272 {
2273 	struct mgmt_mesh_tx *mesh_tx = data;
2274 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2275 	struct adv_info *adv, *next_instance;
2276 	u8 instance = hdev->le_num_of_adv_sets + 1;
2277 	u16 timeout, duration;
2278 	int err = 0;
2279 
2280 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2281 		return MGMT_STATUS_BUSY;
2282 
2283 	timeout = 1000;
2284 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2285 	adv = hci_add_adv_instance(hdev, instance, 0,
2286 				   send->adv_data_len, send->adv_data,
2287 				   0, NULL,
2288 				   timeout, duration,
2289 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2290 				   hdev->le_adv_min_interval,
2291 				   hdev->le_adv_max_interval,
2292 				   mesh_tx->handle);
2293 
2294 	if (!IS_ERR(adv))
2295 		mesh_tx->instance = instance;
2296 	else
2297 		err = PTR_ERR(adv);
2298 
2299 	if (hdev->cur_adv_instance == instance) {
2300 		/* If the currently advertised instance is being changed then
2301 		 * cancel the current advertising and schedule the next
2302 		 * instance. If there is only one instance then the overridden
2303 		 * advertising data will be visible right away.
2304 		 */
2305 		cancel_adv_timeout(hdev);
2306 
2307 		next_instance = hci_get_next_instance(hdev, instance);
2308 		if (next_instance)
2309 			instance = next_instance->instance;
2310 		else
2311 			instance = 0;
2312 	} else if (hdev->adv_instance_timeout) {
2313 		/* Immediately advertise the new instance if no other, or
2314 		 * let it go naturally from queue if ADV is already happening
2315 		 */
2316 		instance = 0;
2317 	}
2318 
2319 	if (instance)
2320 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2321 
2322 	return err;
2323 }
2324 
2325 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2326 {
2327 	struct mgmt_rp_mesh_read_features *rp = data;
2328 
2329 	if (rp->used_handles >= rp->max_handles)
2330 		return;
2331 
2332 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2333 }
2334 
2335 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2336 			 void *data, u16 len)
2337 {
2338 	struct mgmt_rp_mesh_read_features rp;
2339 
2340 	if (!lmp_le_capable(hdev) ||
2341 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2343 				       MGMT_STATUS_NOT_SUPPORTED);
2344 
2345 	memset(&rp, 0, sizeof(rp));
2346 	rp.index = cpu_to_le16(hdev->id);
2347 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2348 		rp.max_handles = MESH_HANDLES_MAX;
2349 
2350 	hci_dev_lock(hdev);
2351 
2352 	if (rp.max_handles)
2353 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2354 
2355 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2356 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2357 
2358 	hci_dev_unlock(hdev);
2359 	return 0;
2360 }
2361 
2362 static int send_cancel(struct hci_dev *hdev, void *data)
2363 {
2364 	struct mgmt_pending_cmd *cmd = data;
2365 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2366 	struct mgmt_mesh_tx *mesh_tx;
2367 
2368 	if (!cancel->handle) {
2369 		do {
2370 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2371 
2372 			if (mesh_tx)
2373 				mesh_send_complete(hdev, mesh_tx, false);
2374 		} while (mesh_tx);
2375 	} else {
2376 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2377 
2378 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2379 			mesh_send_complete(hdev, mesh_tx, false);
2380 	}
2381 
2382 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2383 			  0, NULL, 0);
2384 	mgmt_pending_free(cmd);
2385 
2386 	return 0;
2387 }
2388 
2389 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2390 			    void *data, u16 len)
2391 {
2392 	struct mgmt_pending_cmd *cmd;
2393 	int err;
2394 
2395 	if (!lmp_le_capable(hdev) ||
2396 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2397 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 				       MGMT_STATUS_NOT_SUPPORTED);
2399 
2400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 				       MGMT_STATUS_REJECTED);
2403 
2404 	hci_dev_lock(hdev);
2405 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2406 	if (!cmd)
2407 		err = -ENOMEM;
2408 	else
2409 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2410 
2411 	if (err < 0) {
2412 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2413 				      MGMT_STATUS_FAILED);
2414 
2415 		if (cmd)
2416 			mgmt_pending_free(cmd);
2417 	}
2418 
2419 	hci_dev_unlock(hdev);
2420 	return err;
2421 }
2422 
2423 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2424 {
2425 	struct mgmt_mesh_tx *mesh_tx;
2426 	struct mgmt_cp_mesh_send *send = data;
2427 	struct mgmt_rp_mesh_read_features rp;
2428 	bool sending;
2429 	int err = 0;
2430 
2431 	if (!lmp_le_capable(hdev) ||
2432 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2434 				       MGMT_STATUS_NOT_SUPPORTED);
2435 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2436 	    len <= MGMT_MESH_SEND_SIZE ||
2437 	    len > (MGMT_MESH_SEND_SIZE + 31))
2438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2439 				       MGMT_STATUS_REJECTED);
2440 
2441 	hci_dev_lock(hdev);
2442 
2443 	memset(&rp, 0, sizeof(rp));
2444 	rp.max_handles = MESH_HANDLES_MAX;
2445 
2446 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2447 
2448 	if (rp.max_handles <= rp.used_handles) {
2449 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2450 				      MGMT_STATUS_BUSY);
2451 		goto done;
2452 	}
2453 
2454 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2455 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2456 
2457 	if (!mesh_tx)
2458 		err = -ENOMEM;
2459 	else if (!sending)
2460 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2461 					 mesh_send_start_complete);
2462 
2463 	if (err < 0) {
2464 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 				      MGMT_STATUS_FAILED);
2467 
2468 		if (mesh_tx) {
2469 			if (sending)
2470 				mgmt_mesh_remove(mesh_tx);
2471 		}
2472 	} else {
2473 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2474 
2475 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2476 				  &mesh_tx->handle, 1);
2477 	}
2478 
2479 done:
2480 	hci_dev_unlock(hdev);
2481 	return err;
2482 }
2483 
2484 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2485 {
2486 	struct mgmt_mode *cp = data;
2487 	struct mgmt_pending_cmd *cmd;
2488 	int err;
2489 	u8 val, enabled;
2490 
2491 	bt_dev_dbg(hdev, "sock %p", sk);
2492 
2493 	if (!lmp_le_capable(hdev))
2494 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				       MGMT_STATUS_NOT_SUPPORTED);
2496 
2497 	if (cp->val != 0x00 && cp->val != 0x01)
2498 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 				       MGMT_STATUS_INVALID_PARAMS);
2500 
2501 	/* Bluetooth single mode LE only controllers or dual-mode
2502 	 * controllers configured as LE only devices, do not allow
2503 	 * switching LE off. These have either LE enabled explicitly
2504 	 * or BR/EDR has been previously switched off.
2505 	 *
2506 	 * When trying to enable an already enabled LE, then gracefully
2507 	 * send a positive response. Trying to disable it however will
2508 	 * result into rejection.
2509 	 */
2510 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2511 		if (cp->val == 0x01)
2512 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2513 
2514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 				       MGMT_STATUS_REJECTED);
2516 	}
2517 
2518 	hci_dev_lock(hdev);
2519 
2520 	val = !!cp->val;
2521 	enabled = lmp_host_le_capable(hdev);
2522 
2523 	if (!hdev_is_powered(hdev) || val == enabled) {
2524 		bool changed = false;
2525 
2526 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2527 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2528 			changed = true;
2529 		}
2530 
2531 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2532 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2533 			changed = true;
2534 		}
2535 
2536 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2537 		if (err < 0)
2538 			goto unlock;
2539 
2540 		if (changed)
2541 			err = new_settings(hdev, sk);
2542 
2543 		goto unlock;
2544 	}
2545 
2546 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2547 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2548 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2549 				      MGMT_STATUS_BUSY);
2550 		goto unlock;
2551 	}
2552 
2553 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2554 	if (!cmd)
2555 		err = -ENOMEM;
2556 	else
2557 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2558 					 set_le_complete);
2559 
2560 	if (err < 0) {
2561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2562 				      MGMT_STATUS_FAILED);
2563 
2564 		if (cmd)
2565 			mgmt_pending_remove(cmd);
2566 	}
2567 
2568 unlock:
2569 	hci_dev_unlock(hdev);
2570 	return err;
2571 }
2572 
2573 /* This is a helper function to test for pending mgmt commands that can
2574  * cause CoD or EIR HCI commands. We can only allow one such pending
2575  * mgmt command at a time since otherwise we cannot easily track what
2576  * the current values are, will be, and based on that calculate if a new
2577  * HCI command needs to be sent and if yes with what value.
2578  */
2579 static bool pending_eir_or_class(struct hci_dev *hdev)
2580 {
2581 	struct mgmt_pending_cmd *cmd;
2582 
2583 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2584 		switch (cmd->opcode) {
2585 		case MGMT_OP_ADD_UUID:
2586 		case MGMT_OP_REMOVE_UUID:
2587 		case MGMT_OP_SET_DEV_CLASS:
2588 		case MGMT_OP_SET_POWERED:
2589 			return true;
2590 		}
2591 	}
2592 
2593 	return false;
2594 }
2595 
2596 static const u8 bluetooth_base_uuid[] = {
2597 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2598 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2599 };
2600 
2601 static u8 get_uuid_size(const u8 *uuid)
2602 {
2603 	u32 val;
2604 
2605 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2606 		return 128;
2607 
2608 	val = get_unaligned_le32(&uuid[12]);
2609 	if (val > 0xffff)
2610 		return 32;
2611 
2612 	return 16;
2613 }
2614 
2615 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2616 {
2617 	struct mgmt_pending_cmd *cmd = data;
2618 
2619 	bt_dev_dbg(hdev, "err %d", err);
2620 
2621 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2622 			  mgmt_status(err), hdev->dev_class, 3);
2623 
2624 	mgmt_pending_free(cmd);
2625 }
2626 
2627 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2628 {
2629 	int err;
2630 
2631 	err = hci_update_class_sync(hdev);
2632 	if (err)
2633 		return err;
2634 
2635 	return hci_update_eir_sync(hdev);
2636 }
2637 
2638 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2639 {
2640 	struct mgmt_cp_add_uuid *cp = data;
2641 	struct mgmt_pending_cmd *cmd;
2642 	struct bt_uuid *uuid;
2643 	int err;
2644 
2645 	bt_dev_dbg(hdev, "sock %p", sk);
2646 
2647 	hci_dev_lock(hdev);
2648 
2649 	if (pending_eir_or_class(hdev)) {
2650 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2651 				      MGMT_STATUS_BUSY);
2652 		goto failed;
2653 	}
2654 
2655 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2656 	if (!uuid) {
2657 		err = -ENOMEM;
2658 		goto failed;
2659 	}
2660 
2661 	memcpy(uuid->uuid, cp->uuid, 16);
2662 	uuid->svc_hint = cp->svc_hint;
2663 	uuid->size = get_uuid_size(cp->uuid);
2664 
2665 	list_add_tail(&uuid->list, &hdev->uuids);
2666 
2667 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2668 	if (!cmd) {
2669 		err = -ENOMEM;
2670 		goto failed;
2671 	}
2672 
2673 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2674 	if (err < 0) {
2675 		mgmt_pending_free(cmd);
2676 		goto failed;
2677 	}
2678 
2679 failed:
2680 	hci_dev_unlock(hdev);
2681 	return err;
2682 }
2683 
2684 static bool enable_service_cache(struct hci_dev *hdev)
2685 {
2686 	if (!hdev_is_powered(hdev))
2687 		return false;
2688 
2689 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2690 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2691 				   CACHE_TIMEOUT);
2692 		return true;
2693 	}
2694 
2695 	return false;
2696 }
2697 
2698 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2699 {
2700 	int err;
2701 
2702 	err = hci_update_class_sync(hdev);
2703 	if (err)
2704 		return err;
2705 
2706 	return hci_update_eir_sync(hdev);
2707 }
2708 
2709 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2710 		       u16 len)
2711 {
2712 	struct mgmt_cp_remove_uuid *cp = data;
2713 	struct mgmt_pending_cmd *cmd;
2714 	struct bt_uuid *match, *tmp;
2715 	static const u8 bt_uuid_any[] = {
2716 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2717 	};
2718 	int err, found;
2719 
2720 	bt_dev_dbg(hdev, "sock %p", sk);
2721 
2722 	hci_dev_lock(hdev);
2723 
2724 	if (pending_eir_or_class(hdev)) {
2725 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2726 				      MGMT_STATUS_BUSY);
2727 		goto unlock;
2728 	}
2729 
2730 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2731 		hci_uuids_clear(hdev);
2732 
2733 		if (enable_service_cache(hdev)) {
2734 			err = mgmt_cmd_complete(sk, hdev->id,
2735 						MGMT_OP_REMOVE_UUID,
2736 						0, hdev->dev_class, 3);
2737 			goto unlock;
2738 		}
2739 
2740 		goto update_class;
2741 	}
2742 
2743 	found = 0;
2744 
2745 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2746 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2747 			continue;
2748 
2749 		list_del(&match->list);
2750 		kfree(match);
2751 		found++;
2752 	}
2753 
2754 	if (found == 0) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2756 				      MGMT_STATUS_INVALID_PARAMS);
2757 		goto unlock;
2758 	}
2759 
2760 update_class:
2761 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2762 	if (!cmd) {
2763 		err = -ENOMEM;
2764 		goto unlock;
2765 	}
2766 
2767 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2768 				 mgmt_class_complete);
2769 	if (err < 0)
2770 		mgmt_pending_free(cmd);
2771 
2772 unlock:
2773 	hci_dev_unlock(hdev);
2774 	return err;
2775 }
2776 
2777 static int set_class_sync(struct hci_dev *hdev, void *data)
2778 {
2779 	int err = 0;
2780 
2781 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2782 		cancel_delayed_work_sync(&hdev->service_cache);
2783 		err = hci_update_eir_sync(hdev);
2784 	}
2785 
2786 	if (err)
2787 		return err;
2788 
2789 	return hci_update_class_sync(hdev);
2790 }
2791 
2792 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2793 			 u16 len)
2794 {
2795 	struct mgmt_cp_set_dev_class *cp = data;
2796 	struct mgmt_pending_cmd *cmd;
2797 	int err;
2798 
2799 	bt_dev_dbg(hdev, "sock %p", sk);
2800 
2801 	if (!lmp_bredr_capable(hdev))
2802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803 				       MGMT_STATUS_NOT_SUPPORTED);
2804 
2805 	hci_dev_lock(hdev);
2806 
2807 	if (pending_eir_or_class(hdev)) {
2808 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2809 				      MGMT_STATUS_BUSY);
2810 		goto unlock;
2811 	}
2812 
2813 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2814 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2815 				      MGMT_STATUS_INVALID_PARAMS);
2816 		goto unlock;
2817 	}
2818 
2819 	hdev->major_class = cp->major;
2820 	hdev->minor_class = cp->minor;
2821 
2822 	if (!hdev_is_powered(hdev)) {
2823 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2824 					hdev->dev_class, 3);
2825 		goto unlock;
2826 	}
2827 
2828 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2829 	if (!cmd) {
2830 		err = -ENOMEM;
2831 		goto unlock;
2832 	}
2833 
2834 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2835 				 mgmt_class_complete);
2836 	if (err < 0)
2837 		mgmt_pending_free(cmd);
2838 
2839 unlock:
2840 	hci_dev_unlock(hdev);
2841 	return err;
2842 }
2843 
2844 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2845 			  u16 len)
2846 {
2847 	struct mgmt_cp_load_link_keys *cp = data;
2848 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2849 				   sizeof(struct mgmt_link_key_info));
2850 	u16 key_count, expected_len;
2851 	bool changed;
2852 	int i;
2853 
2854 	bt_dev_dbg(hdev, "sock %p", sk);
2855 
2856 	if (!lmp_bredr_capable(hdev))
2857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2858 				       MGMT_STATUS_NOT_SUPPORTED);
2859 
2860 	key_count = __le16_to_cpu(cp->key_count);
2861 	if (key_count > max_key_count) {
2862 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2863 			   key_count);
2864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2865 				       MGMT_STATUS_INVALID_PARAMS);
2866 	}
2867 
2868 	expected_len = struct_size(cp, keys, key_count);
2869 	if (expected_len != len) {
2870 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2871 			   expected_len, len);
2872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873 				       MGMT_STATUS_INVALID_PARAMS);
2874 	}
2875 
2876 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878 				       MGMT_STATUS_INVALID_PARAMS);
2879 
2880 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2881 		   key_count);
2882 
2883 	for (i = 0; i < key_count; i++) {
2884 		struct mgmt_link_key_info *key = &cp->keys[i];
2885 
2886 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2887 			return mgmt_cmd_status(sk, hdev->id,
2888 					       MGMT_OP_LOAD_LINK_KEYS,
2889 					       MGMT_STATUS_INVALID_PARAMS);
2890 	}
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hci_link_keys_clear(hdev);
2895 
2896 	if (cp->debug_keys)
2897 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2898 	else
2899 		changed = hci_dev_test_and_clear_flag(hdev,
2900 						      HCI_KEEP_DEBUG_KEYS);
2901 
2902 	if (changed)
2903 		new_settings(hdev, NULL);
2904 
2905 	for (i = 0; i < key_count; i++) {
2906 		struct mgmt_link_key_info *key = &cp->keys[i];
2907 
2908 		if (hci_is_blocked_key(hdev,
2909 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2910 				       key->val)) {
2911 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2912 				    &key->addr.bdaddr);
2913 			continue;
2914 		}
2915 
2916 		/* Always ignore debug keys and require a new pairing if
2917 		 * the user wants to use them.
2918 		 */
2919 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2920 			continue;
2921 
2922 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2923 				 key->type, key->pin_len, NULL);
2924 	}
2925 
2926 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2927 
2928 	hci_dev_unlock(hdev);
2929 
2930 	return 0;
2931 }
2932 
2933 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2934 			   u8 addr_type, struct sock *skip_sk)
2935 {
2936 	struct mgmt_ev_device_unpaired ev;
2937 
2938 	bacpy(&ev.addr.bdaddr, bdaddr);
2939 	ev.addr.type = addr_type;
2940 
2941 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2942 			  skip_sk);
2943 }
2944 
2945 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2946 {
2947 	struct mgmt_pending_cmd *cmd = data;
2948 	struct mgmt_cp_unpair_device *cp = cmd->param;
2949 
2950 	if (!err)
2951 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2952 
2953 	cmd->cmd_complete(cmd, err);
2954 	mgmt_pending_free(cmd);
2955 }
2956 
2957 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2958 {
2959 	struct mgmt_pending_cmd *cmd = data;
2960 	struct mgmt_cp_unpair_device *cp = cmd->param;
2961 	struct hci_conn *conn;
2962 
2963 	if (cp->addr.type == BDADDR_BREDR)
2964 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2965 					       &cp->addr.bdaddr);
2966 	else
2967 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2968 					       le_addr_type(cp->addr.type));
2969 
2970 	if (!conn)
2971 		return 0;
2972 
2973 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2974 }
2975 
2976 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2977 			 u16 len)
2978 {
2979 	struct mgmt_cp_unpair_device *cp = data;
2980 	struct mgmt_rp_unpair_device rp;
2981 	struct hci_conn_params *params;
2982 	struct mgmt_pending_cmd *cmd;
2983 	struct hci_conn *conn;
2984 	u8 addr_type;
2985 	int err;
2986 
2987 	memset(&rp, 0, sizeof(rp));
2988 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2989 	rp.addr.type = cp->addr.type;
2990 
2991 	if (!bdaddr_type_is_valid(cp->addr.type))
2992 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 					 MGMT_STATUS_INVALID_PARAMS,
2994 					 &rp, sizeof(rp));
2995 
2996 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2997 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2998 					 MGMT_STATUS_INVALID_PARAMS,
2999 					 &rp, sizeof(rp));
3000 
3001 	hci_dev_lock(hdev);
3002 
3003 	if (!hdev_is_powered(hdev)) {
3004 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3005 					MGMT_STATUS_NOT_POWERED, &rp,
3006 					sizeof(rp));
3007 		goto unlock;
3008 	}
3009 
3010 	if (cp->addr.type == BDADDR_BREDR) {
3011 		/* If disconnection is requested, then look up the
3012 		 * connection. If the remote device is connected, it
3013 		 * will be later used to terminate the link.
3014 		 *
3015 		 * Setting it to NULL explicitly will cause no
3016 		 * termination of the link.
3017 		 */
3018 		if (cp->disconnect)
3019 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3020 						       &cp->addr.bdaddr);
3021 		else
3022 			conn = NULL;
3023 
3024 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3025 		if (err < 0) {
3026 			err = mgmt_cmd_complete(sk, hdev->id,
3027 						MGMT_OP_UNPAIR_DEVICE,
3028 						MGMT_STATUS_NOT_PAIRED, &rp,
3029 						sizeof(rp));
3030 			goto unlock;
3031 		}
3032 
3033 		goto done;
3034 	}
3035 
3036 	/* LE address type */
3037 	addr_type = le_addr_type(cp->addr.type);
3038 
3039 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3040 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3041 	if (err < 0) {
3042 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 					MGMT_STATUS_NOT_PAIRED, &rp,
3044 					sizeof(rp));
3045 		goto unlock;
3046 	}
3047 
3048 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3049 	if (!conn) {
3050 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3051 		goto done;
3052 	}
3053 
3054 
3055 	/* Defer clearing up the connection parameters until closing to
3056 	 * give a chance of keeping them if a repairing happens.
3057 	 */
3058 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3059 
3060 	/* Disable auto-connection parameters if present */
3061 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3062 	if (params) {
3063 		if (params->explicit_connect)
3064 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3065 		else
3066 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3067 	}
3068 
3069 	/* If disconnection is not requested, then clear the connection
3070 	 * variable so that the link is not terminated.
3071 	 */
3072 	if (!cp->disconnect)
3073 		conn = NULL;
3074 
3075 done:
3076 	/* If the connection variable is set, then termination of the
3077 	 * link is requested.
3078 	 */
3079 	if (!conn) {
3080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3081 					&rp, sizeof(rp));
3082 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3083 		goto unlock;
3084 	}
3085 
3086 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3087 			       sizeof(*cp));
3088 	if (!cmd) {
3089 		err = -ENOMEM;
3090 		goto unlock;
3091 	}
3092 
3093 	cmd->cmd_complete = addr_cmd_complete;
3094 
3095 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3096 				 unpair_device_complete);
3097 	if (err < 0)
3098 		mgmt_pending_free(cmd);
3099 
3100 unlock:
3101 	hci_dev_unlock(hdev);
3102 	return err;
3103 }
3104 
3105 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3106 		      u16 len)
3107 {
3108 	struct mgmt_cp_disconnect *cp = data;
3109 	struct mgmt_rp_disconnect rp;
3110 	struct mgmt_pending_cmd *cmd;
3111 	struct hci_conn *conn;
3112 	int err;
3113 
3114 	bt_dev_dbg(hdev, "sock %p", sk);
3115 
3116 	memset(&rp, 0, sizeof(rp));
3117 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3118 	rp.addr.type = cp->addr.type;
3119 
3120 	if (!bdaddr_type_is_valid(cp->addr.type))
3121 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3122 					 MGMT_STATUS_INVALID_PARAMS,
3123 					 &rp, sizeof(rp));
3124 
3125 	hci_dev_lock(hdev);
3126 
3127 	if (!test_bit(HCI_UP, &hdev->flags)) {
3128 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3129 					MGMT_STATUS_NOT_POWERED, &rp,
3130 					sizeof(rp));
3131 		goto failed;
3132 	}
3133 
3134 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3135 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3136 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3137 		goto failed;
3138 	}
3139 
3140 	if (cp->addr.type == BDADDR_BREDR)
3141 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3142 					       &cp->addr.bdaddr);
3143 	else
3144 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3145 					       le_addr_type(cp->addr.type));
3146 
3147 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3148 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3149 					MGMT_STATUS_NOT_CONNECTED, &rp,
3150 					sizeof(rp));
3151 		goto failed;
3152 	}
3153 
3154 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3155 	if (!cmd) {
3156 		err = -ENOMEM;
3157 		goto failed;
3158 	}
3159 
3160 	cmd->cmd_complete = generic_cmd_complete;
3161 
3162 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3163 	if (err < 0)
3164 		mgmt_pending_remove(cmd);
3165 
3166 failed:
3167 	hci_dev_unlock(hdev);
3168 	return err;
3169 }
3170 
3171 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3172 {
3173 	switch (link_type) {
3174 	case LE_LINK:
3175 		switch (addr_type) {
3176 		case ADDR_LE_DEV_PUBLIC:
3177 			return BDADDR_LE_PUBLIC;
3178 
3179 		default:
3180 			/* Fallback to LE Random address type */
3181 			return BDADDR_LE_RANDOM;
3182 		}
3183 
3184 	default:
3185 		/* Fallback to BR/EDR type */
3186 		return BDADDR_BREDR;
3187 	}
3188 }
3189 
3190 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3191 			   u16 data_len)
3192 {
3193 	struct mgmt_rp_get_connections *rp;
3194 	struct hci_conn *c;
3195 	int err;
3196 	u16 i;
3197 
3198 	bt_dev_dbg(hdev, "sock %p", sk);
3199 
3200 	hci_dev_lock(hdev);
3201 
3202 	if (!hdev_is_powered(hdev)) {
3203 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3204 				      MGMT_STATUS_NOT_POWERED);
3205 		goto unlock;
3206 	}
3207 
3208 	i = 0;
3209 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3210 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3211 			i++;
3212 	}
3213 
3214 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3215 	if (!rp) {
3216 		err = -ENOMEM;
3217 		goto unlock;
3218 	}
3219 
3220 	i = 0;
3221 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3222 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3223 			continue;
3224 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3225 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3226 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3227 			continue;
3228 		i++;
3229 	}
3230 
3231 	rp->conn_count = cpu_to_le16(i);
3232 
3233 	/* Recalculate length in case of filtered SCO connections, etc */
3234 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3235 				struct_size(rp, addr, i));
3236 
3237 	kfree(rp);
3238 
3239 unlock:
3240 	hci_dev_unlock(hdev);
3241 	return err;
3242 }
3243 
3244 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3245 				   struct mgmt_cp_pin_code_neg_reply *cp)
3246 {
3247 	struct mgmt_pending_cmd *cmd;
3248 	int err;
3249 
3250 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3251 			       sizeof(*cp));
3252 	if (!cmd)
3253 		return -ENOMEM;
3254 
3255 	cmd->cmd_complete = addr_cmd_complete;
3256 
3257 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3258 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3259 	if (err < 0)
3260 		mgmt_pending_remove(cmd);
3261 
3262 	return err;
3263 }
3264 
3265 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3266 			  u16 len)
3267 {
3268 	struct hci_conn *conn;
3269 	struct mgmt_cp_pin_code_reply *cp = data;
3270 	struct hci_cp_pin_code_reply reply;
3271 	struct mgmt_pending_cmd *cmd;
3272 	int err;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	hci_dev_lock(hdev);
3277 
3278 	if (!hdev_is_powered(hdev)) {
3279 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3280 				      MGMT_STATUS_NOT_POWERED);
3281 		goto failed;
3282 	}
3283 
3284 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3285 	if (!conn) {
3286 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3287 				      MGMT_STATUS_NOT_CONNECTED);
3288 		goto failed;
3289 	}
3290 
3291 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3292 		struct mgmt_cp_pin_code_neg_reply ncp;
3293 
3294 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3295 
3296 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3297 
3298 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3299 		if (err >= 0)
3300 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 					      MGMT_STATUS_INVALID_PARAMS);
3302 
3303 		goto failed;
3304 	}
3305 
3306 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3307 	if (!cmd) {
3308 		err = -ENOMEM;
3309 		goto failed;
3310 	}
3311 
3312 	cmd->cmd_complete = addr_cmd_complete;
3313 
3314 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3315 	reply.pin_len = cp->pin_len;
3316 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3317 
3318 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3319 	if (err < 0)
3320 		mgmt_pending_remove(cmd);
3321 
3322 failed:
3323 	hci_dev_unlock(hdev);
3324 	return err;
3325 }
3326 
3327 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3328 			     u16 len)
3329 {
3330 	struct mgmt_cp_set_io_capability *cp = data;
3331 
3332 	bt_dev_dbg(hdev, "sock %p", sk);
3333 
3334 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3336 				       MGMT_STATUS_INVALID_PARAMS);
3337 
3338 	hci_dev_lock(hdev);
3339 
3340 	hdev->io_capability = cp->io_capability;
3341 
3342 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3343 
3344 	hci_dev_unlock(hdev);
3345 
3346 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3347 				 NULL, 0);
3348 }
3349 
3350 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3351 {
3352 	struct hci_dev *hdev = conn->hdev;
3353 	struct mgmt_pending_cmd *cmd;
3354 
3355 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3356 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3357 			continue;
3358 
3359 		if (cmd->user_data != conn)
3360 			continue;
3361 
3362 		return cmd;
3363 	}
3364 
3365 	return NULL;
3366 }
3367 
3368 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3369 {
3370 	struct mgmt_rp_pair_device rp;
3371 	struct hci_conn *conn = cmd->user_data;
3372 	int err;
3373 
3374 	bacpy(&rp.addr.bdaddr, &conn->dst);
3375 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3376 
3377 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3378 				status, &rp, sizeof(rp));
3379 
3380 	/* So we don't get further callbacks for this connection */
3381 	conn->connect_cfm_cb = NULL;
3382 	conn->security_cfm_cb = NULL;
3383 	conn->disconn_cfm_cb = NULL;
3384 
3385 	hci_conn_drop(conn);
3386 
3387 	/* The device is paired so there is no need to remove
3388 	 * its connection parameters anymore.
3389 	 */
3390 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3391 
3392 	hci_conn_put(conn);
3393 
3394 	return err;
3395 }
3396 
3397 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3398 {
3399 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3400 	struct mgmt_pending_cmd *cmd;
3401 
3402 	cmd = find_pairing(conn);
3403 	if (cmd) {
3404 		cmd->cmd_complete(cmd, status);
3405 		mgmt_pending_remove(cmd);
3406 	}
3407 }
3408 
3409 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3410 {
3411 	struct mgmt_pending_cmd *cmd;
3412 
3413 	BT_DBG("status %u", status);
3414 
3415 	cmd = find_pairing(conn);
3416 	if (!cmd) {
3417 		BT_DBG("Unable to find a pending command");
3418 		return;
3419 	}
3420 
3421 	cmd->cmd_complete(cmd, mgmt_status(status));
3422 	mgmt_pending_remove(cmd);
3423 }
3424 
3425 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3426 {
3427 	struct mgmt_pending_cmd *cmd;
3428 
3429 	BT_DBG("status %u", status);
3430 
3431 	if (!status)
3432 		return;
3433 
3434 	cmd = find_pairing(conn);
3435 	if (!cmd) {
3436 		BT_DBG("Unable to find a pending command");
3437 		return;
3438 	}
3439 
3440 	cmd->cmd_complete(cmd, mgmt_status(status));
3441 	mgmt_pending_remove(cmd);
3442 }
3443 
3444 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3445 		       u16 len)
3446 {
3447 	struct mgmt_cp_pair_device *cp = data;
3448 	struct mgmt_rp_pair_device rp;
3449 	struct mgmt_pending_cmd *cmd;
3450 	u8 sec_level, auth_type;
3451 	struct hci_conn *conn;
3452 	int err;
3453 
3454 	bt_dev_dbg(hdev, "sock %p", sk);
3455 
3456 	memset(&rp, 0, sizeof(rp));
3457 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3458 	rp.addr.type = cp->addr.type;
3459 
3460 	if (!bdaddr_type_is_valid(cp->addr.type))
3461 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 					 MGMT_STATUS_INVALID_PARAMS,
3463 					 &rp, sizeof(rp));
3464 
3465 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3466 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3467 					 MGMT_STATUS_INVALID_PARAMS,
3468 					 &rp, sizeof(rp));
3469 
3470 	hci_dev_lock(hdev);
3471 
3472 	if (!hdev_is_powered(hdev)) {
3473 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3474 					MGMT_STATUS_NOT_POWERED, &rp,
3475 					sizeof(rp));
3476 		goto unlock;
3477 	}
3478 
3479 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3480 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3482 					sizeof(rp));
3483 		goto unlock;
3484 	}
3485 
3486 	sec_level = BT_SECURITY_MEDIUM;
3487 	auth_type = HCI_AT_DEDICATED_BONDING;
3488 
3489 	if (cp->addr.type == BDADDR_BREDR) {
3490 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3491 				       auth_type, CONN_REASON_PAIR_DEVICE);
3492 	} else {
3493 		u8 addr_type = le_addr_type(cp->addr.type);
3494 		struct hci_conn_params *p;
3495 
3496 		/* When pairing a new device, it is expected to remember
3497 		 * this device for future connections. Adding the connection
3498 		 * parameter information ahead of time allows tracking
3499 		 * of the peripheral preferred values and will speed up any
3500 		 * further connection establishment.
3501 		 *
3502 		 * If connection parameters already exist, then they
3503 		 * will be kept and this function does nothing.
3504 		 */
3505 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3506 
3507 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3508 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3509 
3510 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3511 					   sec_level, HCI_LE_CONN_TIMEOUT,
3512 					   CONN_REASON_PAIR_DEVICE);
3513 	}
3514 
3515 	if (IS_ERR(conn)) {
3516 		int status;
3517 
3518 		if (PTR_ERR(conn) == -EBUSY)
3519 			status = MGMT_STATUS_BUSY;
3520 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3521 			status = MGMT_STATUS_NOT_SUPPORTED;
3522 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3523 			status = MGMT_STATUS_REJECTED;
3524 		else
3525 			status = MGMT_STATUS_CONNECT_FAILED;
3526 
3527 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3528 					status, &rp, sizeof(rp));
3529 		goto unlock;
3530 	}
3531 
3532 	if (conn->connect_cfm_cb) {
3533 		hci_conn_drop(conn);
3534 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3535 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3536 		goto unlock;
3537 	}
3538 
3539 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3540 	if (!cmd) {
3541 		err = -ENOMEM;
3542 		hci_conn_drop(conn);
3543 		goto unlock;
3544 	}
3545 
3546 	cmd->cmd_complete = pairing_complete;
3547 
3548 	/* For LE, just connecting isn't a proof that the pairing finished */
3549 	if (cp->addr.type == BDADDR_BREDR) {
3550 		conn->connect_cfm_cb = pairing_complete_cb;
3551 		conn->security_cfm_cb = pairing_complete_cb;
3552 		conn->disconn_cfm_cb = pairing_complete_cb;
3553 	} else {
3554 		conn->connect_cfm_cb = le_pairing_complete_cb;
3555 		conn->security_cfm_cb = le_pairing_complete_cb;
3556 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3557 	}
3558 
3559 	conn->io_capability = cp->io_cap;
3560 	cmd->user_data = hci_conn_get(conn);
3561 
3562 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3563 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3564 		cmd->cmd_complete(cmd, 0);
3565 		mgmt_pending_remove(cmd);
3566 	}
3567 
3568 	err = 0;
3569 
3570 unlock:
3571 	hci_dev_unlock(hdev);
3572 	return err;
3573 }
3574 
3575 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3576 {
3577 	struct hci_conn *conn;
3578 	u16 handle = PTR_ERR(data);
3579 
3580 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3581 	if (!conn)
3582 		return 0;
3583 
3584 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3585 }
3586 
3587 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3588 			      u16 len)
3589 {
3590 	struct mgmt_addr_info *addr = data;
3591 	struct mgmt_pending_cmd *cmd;
3592 	struct hci_conn *conn;
3593 	int err;
3594 
3595 	bt_dev_dbg(hdev, "sock %p", sk);
3596 
3597 	hci_dev_lock(hdev);
3598 
3599 	if (!hdev_is_powered(hdev)) {
3600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3601 				      MGMT_STATUS_NOT_POWERED);
3602 		goto unlock;
3603 	}
3604 
3605 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3606 	if (!cmd) {
3607 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 				      MGMT_STATUS_INVALID_PARAMS);
3609 		goto unlock;
3610 	}
3611 
3612 	conn = cmd->user_data;
3613 
3614 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3615 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3616 				      MGMT_STATUS_INVALID_PARAMS);
3617 		goto unlock;
3618 	}
3619 
3620 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3621 	mgmt_pending_remove(cmd);
3622 
3623 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3624 				addr, sizeof(*addr));
3625 
3626 	/* Since user doesn't want to proceed with the connection, abort any
3627 	 * ongoing pairing and then terminate the link if it was created
3628 	 * because of the pair device action.
3629 	 */
3630 	if (addr->type == BDADDR_BREDR)
3631 		hci_remove_link_key(hdev, &addr->bdaddr);
3632 	else
3633 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3634 					      le_addr_type(addr->type));
3635 
3636 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3637 		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3638 				   NULL);
3639 
3640 unlock:
3641 	hci_dev_unlock(hdev);
3642 	return err;
3643 }
3644 
3645 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3646 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3647 			     u16 hci_op, __le32 passkey)
3648 {
3649 	struct mgmt_pending_cmd *cmd;
3650 	struct hci_conn *conn;
3651 	int err;
3652 
3653 	hci_dev_lock(hdev);
3654 
3655 	if (!hdev_is_powered(hdev)) {
3656 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3657 					MGMT_STATUS_NOT_POWERED, addr,
3658 					sizeof(*addr));
3659 		goto done;
3660 	}
3661 
3662 	if (addr->type == BDADDR_BREDR)
3663 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3664 	else
3665 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3666 					       le_addr_type(addr->type));
3667 
3668 	if (!conn) {
3669 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3670 					MGMT_STATUS_NOT_CONNECTED, addr,
3671 					sizeof(*addr));
3672 		goto done;
3673 	}
3674 
3675 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3676 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3677 		if (!err)
3678 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3679 						MGMT_STATUS_SUCCESS, addr,
3680 						sizeof(*addr));
3681 		else
3682 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 						MGMT_STATUS_FAILED, addr,
3684 						sizeof(*addr));
3685 
3686 		goto done;
3687 	}
3688 
3689 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3690 	if (!cmd) {
3691 		err = -ENOMEM;
3692 		goto done;
3693 	}
3694 
3695 	cmd->cmd_complete = addr_cmd_complete;
3696 
3697 	/* Continue with pairing via HCI */
3698 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3699 		struct hci_cp_user_passkey_reply cp;
3700 
3701 		bacpy(&cp.bdaddr, &addr->bdaddr);
3702 		cp.passkey = passkey;
3703 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3704 	} else
3705 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3706 				   &addr->bdaddr);
3707 
3708 	if (err < 0)
3709 		mgmt_pending_remove(cmd);
3710 
3711 done:
3712 	hci_dev_unlock(hdev);
3713 	return err;
3714 }
3715 
3716 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3717 			      void *data, u16 len)
3718 {
3719 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3720 
3721 	bt_dev_dbg(hdev, "sock %p", sk);
3722 
3723 	return user_pairing_resp(sk, hdev, &cp->addr,
3724 				MGMT_OP_PIN_CODE_NEG_REPLY,
3725 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3726 }
3727 
3728 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3729 			      u16 len)
3730 {
3731 	struct mgmt_cp_user_confirm_reply *cp = data;
3732 
3733 	bt_dev_dbg(hdev, "sock %p", sk);
3734 
3735 	if (len != sizeof(*cp))
3736 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3737 				       MGMT_STATUS_INVALID_PARAMS);
3738 
3739 	return user_pairing_resp(sk, hdev, &cp->addr,
3740 				 MGMT_OP_USER_CONFIRM_REPLY,
3741 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3742 }
3743 
3744 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3745 				  void *data, u16 len)
3746 {
3747 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3748 
3749 	bt_dev_dbg(hdev, "sock %p", sk);
3750 
3751 	return user_pairing_resp(sk, hdev, &cp->addr,
3752 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3753 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3754 }
3755 
3756 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3757 			      u16 len)
3758 {
3759 	struct mgmt_cp_user_passkey_reply *cp = data;
3760 
3761 	bt_dev_dbg(hdev, "sock %p", sk);
3762 
3763 	return user_pairing_resp(sk, hdev, &cp->addr,
3764 				 MGMT_OP_USER_PASSKEY_REPLY,
3765 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3766 }
3767 
3768 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3769 				  void *data, u16 len)
3770 {
3771 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3772 
3773 	bt_dev_dbg(hdev, "sock %p", sk);
3774 
3775 	return user_pairing_resp(sk, hdev, &cp->addr,
3776 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3777 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3778 }
3779 
3780 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3781 {
3782 	struct adv_info *adv_instance;
3783 
3784 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3785 	if (!adv_instance)
3786 		return 0;
3787 
3788 	/* stop if current instance doesn't need to be changed */
3789 	if (!(adv_instance->flags & flags))
3790 		return 0;
3791 
3792 	cancel_adv_timeout(hdev);
3793 
3794 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3795 	if (!adv_instance)
3796 		return 0;
3797 
3798 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3799 
3800 	return 0;
3801 }
3802 
3803 static int name_changed_sync(struct hci_dev *hdev, void *data)
3804 {
3805 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3806 }
3807 
3808 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3809 {
3810 	struct mgmt_pending_cmd *cmd = data;
3811 	struct mgmt_cp_set_local_name *cp = cmd->param;
3812 	u8 status = mgmt_status(err);
3813 
3814 	bt_dev_dbg(hdev, "err %d", err);
3815 
3816 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3817 		return;
3818 
3819 	if (status) {
3820 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3821 				status);
3822 	} else {
3823 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3824 				  cp, sizeof(*cp));
3825 
3826 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3827 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3828 	}
3829 
3830 	mgmt_pending_remove(cmd);
3831 }
3832 
3833 static int set_name_sync(struct hci_dev *hdev, void *data)
3834 {
3835 	if (lmp_bredr_capable(hdev)) {
3836 		hci_update_name_sync(hdev);
3837 		hci_update_eir_sync(hdev);
3838 	}
3839 
3840 	/* The name is stored in the scan response data and so
3841 	 * no need to update the advertising data here.
3842 	 */
3843 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3844 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3845 
3846 	return 0;
3847 }
3848 
3849 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3850 			  u16 len)
3851 {
3852 	struct mgmt_cp_set_local_name *cp = data;
3853 	struct mgmt_pending_cmd *cmd;
3854 	int err;
3855 
3856 	bt_dev_dbg(hdev, "sock %p", sk);
3857 
3858 	hci_dev_lock(hdev);
3859 
3860 	/* If the old values are the same as the new ones just return a
3861 	 * direct command complete event.
3862 	 */
3863 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3864 	    !memcmp(hdev->short_name, cp->short_name,
3865 		    sizeof(hdev->short_name))) {
3866 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3867 					data, len);
3868 		goto failed;
3869 	}
3870 
3871 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3872 
3873 	if (!hdev_is_powered(hdev)) {
3874 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3875 
3876 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3877 					data, len);
3878 		if (err < 0)
3879 			goto failed;
3880 
3881 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3882 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3883 		ext_info_changed(hdev, sk);
3884 
3885 		goto failed;
3886 	}
3887 
3888 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3889 	if (!cmd)
3890 		err = -ENOMEM;
3891 	else
3892 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3893 					 set_name_complete);
3894 
3895 	if (err < 0) {
3896 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3897 				      MGMT_STATUS_FAILED);
3898 
3899 		if (cmd)
3900 			mgmt_pending_remove(cmd);
3901 
3902 		goto failed;
3903 	}
3904 
3905 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3906 
3907 failed:
3908 	hci_dev_unlock(hdev);
3909 	return err;
3910 }
3911 
3912 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3913 {
3914 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3915 }
3916 
3917 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3918 			  u16 len)
3919 {
3920 	struct mgmt_cp_set_appearance *cp = data;
3921 	u16 appearance;
3922 	int err;
3923 
3924 	bt_dev_dbg(hdev, "sock %p", sk);
3925 
3926 	if (!lmp_le_capable(hdev))
3927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3928 				       MGMT_STATUS_NOT_SUPPORTED);
3929 
3930 	appearance = le16_to_cpu(cp->appearance);
3931 
3932 	hci_dev_lock(hdev);
3933 
3934 	if (hdev->appearance != appearance) {
3935 		hdev->appearance = appearance;
3936 
3937 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3938 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3939 					   NULL);
3940 
3941 		ext_info_changed(hdev, sk);
3942 	}
3943 
3944 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3945 				0);
3946 
3947 	hci_dev_unlock(hdev);
3948 
3949 	return err;
3950 }
3951 
3952 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3953 				 void *data, u16 len)
3954 {
3955 	struct mgmt_rp_get_phy_configuration rp;
3956 
3957 	bt_dev_dbg(hdev, "sock %p", sk);
3958 
3959 	hci_dev_lock(hdev);
3960 
3961 	memset(&rp, 0, sizeof(rp));
3962 
3963 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3964 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3965 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3966 
3967 	hci_dev_unlock(hdev);
3968 
3969 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3970 				 &rp, sizeof(rp));
3971 }
3972 
3973 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3974 {
3975 	struct mgmt_ev_phy_configuration_changed ev;
3976 
3977 	memset(&ev, 0, sizeof(ev));
3978 
3979 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3980 
3981 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3982 			  sizeof(ev), skip);
3983 }
3984 
3985 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3986 {
3987 	struct mgmt_pending_cmd *cmd = data;
3988 	struct sk_buff *skb = cmd->skb;
3989 	u8 status = mgmt_status(err);
3990 
3991 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3992 		return;
3993 
3994 	if (!status) {
3995 		if (!skb)
3996 			status = MGMT_STATUS_FAILED;
3997 		else if (IS_ERR(skb))
3998 			status = mgmt_status(PTR_ERR(skb));
3999 		else
4000 			status = mgmt_status(skb->data[0]);
4001 	}
4002 
4003 	bt_dev_dbg(hdev, "status %d", status);
4004 
4005 	if (status) {
4006 		mgmt_cmd_status(cmd->sk, hdev->id,
4007 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4008 	} else {
4009 		mgmt_cmd_complete(cmd->sk, hdev->id,
4010 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4011 				  NULL, 0);
4012 
4013 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4014 	}
4015 
4016 	if (skb && !IS_ERR(skb))
4017 		kfree_skb(skb);
4018 
4019 	mgmt_pending_remove(cmd);
4020 }
4021 
4022 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4023 {
4024 	struct mgmt_pending_cmd *cmd = data;
4025 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4026 	struct hci_cp_le_set_default_phy cp_phy;
4027 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4028 
4029 	memset(&cp_phy, 0, sizeof(cp_phy));
4030 
4031 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4032 		cp_phy.all_phys |= 0x01;
4033 
4034 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4035 		cp_phy.all_phys |= 0x02;
4036 
4037 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4038 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4039 
4040 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4041 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4042 
4043 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4044 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4045 
4046 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4047 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4048 
4049 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4050 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4051 
4052 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4053 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4054 
4055 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4056 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4057 
4058 	return 0;
4059 }
4060 
4061 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4062 				 void *data, u16 len)
4063 {
4064 	struct mgmt_cp_set_phy_configuration *cp = data;
4065 	struct mgmt_pending_cmd *cmd;
4066 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4067 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4068 	bool changed = false;
4069 	int err;
4070 
4071 	bt_dev_dbg(hdev, "sock %p", sk);
4072 
4073 	configurable_phys = get_configurable_phys(hdev);
4074 	supported_phys = get_supported_phys(hdev);
4075 	selected_phys = __le32_to_cpu(cp->selected_phys);
4076 
4077 	if (selected_phys & ~supported_phys)
4078 		return mgmt_cmd_status(sk, hdev->id,
4079 				       MGMT_OP_SET_PHY_CONFIGURATION,
4080 				       MGMT_STATUS_INVALID_PARAMS);
4081 
4082 	unconfigure_phys = supported_phys & ~configurable_phys;
4083 
4084 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4085 		return mgmt_cmd_status(sk, hdev->id,
4086 				       MGMT_OP_SET_PHY_CONFIGURATION,
4087 				       MGMT_STATUS_INVALID_PARAMS);
4088 
4089 	if (selected_phys == get_selected_phys(hdev))
4090 		return mgmt_cmd_complete(sk, hdev->id,
4091 					 MGMT_OP_SET_PHY_CONFIGURATION,
4092 					 0, NULL, 0);
4093 
4094 	hci_dev_lock(hdev);
4095 
4096 	if (!hdev_is_powered(hdev)) {
4097 		err = mgmt_cmd_status(sk, hdev->id,
4098 				      MGMT_OP_SET_PHY_CONFIGURATION,
4099 				      MGMT_STATUS_REJECTED);
4100 		goto unlock;
4101 	}
4102 
4103 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4104 		err = mgmt_cmd_status(sk, hdev->id,
4105 				      MGMT_OP_SET_PHY_CONFIGURATION,
4106 				      MGMT_STATUS_BUSY);
4107 		goto unlock;
4108 	}
4109 
4110 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4111 		pkt_type |= (HCI_DH3 | HCI_DM3);
4112 	else
4113 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4114 
4115 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4116 		pkt_type |= (HCI_DH5 | HCI_DM5);
4117 	else
4118 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4119 
4120 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4121 		pkt_type &= ~HCI_2DH1;
4122 	else
4123 		pkt_type |= HCI_2DH1;
4124 
4125 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4126 		pkt_type &= ~HCI_2DH3;
4127 	else
4128 		pkt_type |= HCI_2DH3;
4129 
4130 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4131 		pkt_type &= ~HCI_2DH5;
4132 	else
4133 		pkt_type |= HCI_2DH5;
4134 
4135 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4136 		pkt_type &= ~HCI_3DH1;
4137 	else
4138 		pkt_type |= HCI_3DH1;
4139 
4140 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4141 		pkt_type &= ~HCI_3DH3;
4142 	else
4143 		pkt_type |= HCI_3DH3;
4144 
4145 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4146 		pkt_type &= ~HCI_3DH5;
4147 	else
4148 		pkt_type |= HCI_3DH5;
4149 
4150 	if (pkt_type != hdev->pkt_type) {
4151 		hdev->pkt_type = pkt_type;
4152 		changed = true;
4153 	}
4154 
4155 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4156 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4157 		if (changed)
4158 			mgmt_phy_configuration_changed(hdev, sk);
4159 
4160 		err = mgmt_cmd_complete(sk, hdev->id,
4161 					MGMT_OP_SET_PHY_CONFIGURATION,
4162 					0, NULL, 0);
4163 
4164 		goto unlock;
4165 	}
4166 
4167 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4168 			       len);
4169 	if (!cmd)
4170 		err = -ENOMEM;
4171 	else
4172 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4173 					 set_default_phy_complete);
4174 
4175 	if (err < 0) {
4176 		err = mgmt_cmd_status(sk, hdev->id,
4177 				      MGMT_OP_SET_PHY_CONFIGURATION,
4178 				      MGMT_STATUS_FAILED);
4179 
4180 		if (cmd)
4181 			mgmt_pending_remove(cmd);
4182 	}
4183 
4184 unlock:
4185 	hci_dev_unlock(hdev);
4186 
4187 	return err;
4188 }
4189 
4190 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4191 			    u16 len)
4192 {
4193 	int err = MGMT_STATUS_SUCCESS;
4194 	struct mgmt_cp_set_blocked_keys *keys = data;
4195 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4196 				   sizeof(struct mgmt_blocked_key_info));
4197 	u16 key_count, expected_len;
4198 	int i;
4199 
4200 	bt_dev_dbg(hdev, "sock %p", sk);
4201 
4202 	key_count = __le16_to_cpu(keys->key_count);
4203 	if (key_count > max_key_count) {
4204 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4205 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4206 				       MGMT_STATUS_INVALID_PARAMS);
4207 	}
4208 
4209 	expected_len = struct_size(keys, keys, key_count);
4210 	if (expected_len != len) {
4211 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4212 			   expected_len, len);
4213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 	}
4216 
4217 	hci_dev_lock(hdev);
4218 
4219 	hci_blocked_keys_clear(hdev);
4220 
4221 	for (i = 0; i < key_count; ++i) {
4222 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4223 
4224 		if (!b) {
4225 			err = MGMT_STATUS_NO_RESOURCES;
4226 			break;
4227 		}
4228 
4229 		b->type = keys->keys[i].type;
4230 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4231 		list_add_rcu(&b->list, &hdev->blocked_keys);
4232 	}
4233 	hci_dev_unlock(hdev);
4234 
4235 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4236 				err, NULL, 0);
4237 }
4238 
4239 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4240 			       void *data, u16 len)
4241 {
4242 	struct mgmt_mode *cp = data;
4243 	int err;
4244 	bool changed = false;
4245 
4246 	bt_dev_dbg(hdev, "sock %p", sk);
4247 
4248 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4249 		return mgmt_cmd_status(sk, hdev->id,
4250 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4251 				       MGMT_STATUS_NOT_SUPPORTED);
4252 
4253 	if (cp->val != 0x00 && cp->val != 0x01)
4254 		return mgmt_cmd_status(sk, hdev->id,
4255 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4256 				       MGMT_STATUS_INVALID_PARAMS);
4257 
4258 	hci_dev_lock(hdev);
4259 
4260 	if (hdev_is_powered(hdev) &&
4261 	    !!cp->val != hci_dev_test_flag(hdev,
4262 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4263 		err = mgmt_cmd_status(sk, hdev->id,
4264 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4265 				      MGMT_STATUS_REJECTED);
4266 		goto unlock;
4267 	}
4268 
4269 	if (cp->val)
4270 		changed = !hci_dev_test_and_set_flag(hdev,
4271 						   HCI_WIDEBAND_SPEECH_ENABLED);
4272 	else
4273 		changed = hci_dev_test_and_clear_flag(hdev,
4274 						   HCI_WIDEBAND_SPEECH_ENABLED);
4275 
4276 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4277 	if (err < 0)
4278 		goto unlock;
4279 
4280 	if (changed)
4281 		err = new_settings(hdev, sk);
4282 
4283 unlock:
4284 	hci_dev_unlock(hdev);
4285 	return err;
4286 }
4287 
4288 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4289 			       void *data, u16 data_len)
4290 {
4291 	char buf[20];
4292 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4293 	u16 cap_len = 0;
4294 	u8 flags = 0;
4295 	u8 tx_power_range[2];
4296 
4297 	bt_dev_dbg(hdev, "sock %p", sk);
4298 
4299 	memset(&buf, 0, sizeof(buf));
4300 
4301 	hci_dev_lock(hdev);
4302 
4303 	/* When the Read Simple Pairing Options command is supported, then
4304 	 * the remote public key validation is supported.
4305 	 *
4306 	 * Alternatively, when Microsoft extensions are available, they can
4307 	 * indicate support for public key validation as well.
4308 	 */
4309 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4310 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4311 
4312 	flags |= 0x02;		/* Remote public key validation (LE) */
4313 
4314 	/* When the Read Encryption Key Size command is supported, then the
4315 	 * encryption key size is enforced.
4316 	 */
4317 	if (hdev->commands[20] & 0x10)
4318 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4319 
4320 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4321 
4322 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4323 				  &flags, 1);
4324 
4325 	/* When the Read Simple Pairing Options command is supported, then
4326 	 * also max encryption key size information is provided.
4327 	 */
4328 	if (hdev->commands[41] & 0x08)
4329 		cap_len = eir_append_le16(rp->cap, cap_len,
4330 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4331 					  hdev->max_enc_key_size);
4332 
4333 	cap_len = eir_append_le16(rp->cap, cap_len,
4334 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4335 				  SMP_MAX_ENC_KEY_SIZE);
4336 
4337 	/* Append the min/max LE tx power parameters if we were able to fetch
4338 	 * it from the controller
4339 	 */
4340 	if (hdev->commands[38] & 0x80) {
4341 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4342 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4343 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4344 					  tx_power_range, 2);
4345 	}
4346 
4347 	rp->cap_len = cpu_to_le16(cap_len);
4348 
4349 	hci_dev_unlock(hdev);
4350 
4351 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4352 				 rp, sizeof(*rp) + cap_len);
4353 }
4354 
4355 #ifdef CONFIG_BT_FEATURE_DEBUG
4356 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4357 static const u8 debug_uuid[16] = {
4358 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4359 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4360 };
4361 #endif
4362 
4363 /* 330859bc-7506-492d-9370-9a6f0614037f */
4364 static const u8 quality_report_uuid[16] = {
4365 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4366 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4367 };
4368 
4369 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4370 static const u8 offload_codecs_uuid[16] = {
4371 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4372 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4373 };
4374 
4375 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4376 static const u8 le_simultaneous_roles_uuid[16] = {
4377 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4378 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4379 };
4380 
4381 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4382 static const u8 rpa_resolution_uuid[16] = {
4383 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4384 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4385 };
4386 
4387 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4388 static const u8 iso_socket_uuid[16] = {
4389 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4390 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4391 };
4392 
4393 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4394 static const u8 mgmt_mesh_uuid[16] = {
4395 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4396 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4397 };
4398 
4399 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4400 				  void *data, u16 data_len)
4401 {
4402 	struct mgmt_rp_read_exp_features_info *rp;
4403 	size_t len;
4404 	u16 idx = 0;
4405 	u32 flags;
4406 	int status;
4407 
4408 	bt_dev_dbg(hdev, "sock %p", sk);
4409 
4410 	/* Enough space for 7 features */
4411 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4412 	rp = kzalloc(len, GFP_KERNEL);
4413 	if (!rp)
4414 		return -ENOMEM;
4415 
4416 #ifdef CONFIG_BT_FEATURE_DEBUG
4417 	if (!hdev) {
4418 		flags = bt_dbg_get() ? BIT(0) : 0;
4419 
4420 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4421 		rp->features[idx].flags = cpu_to_le32(flags);
4422 		idx++;
4423 	}
4424 #endif
4425 
4426 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4427 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4428 			flags = BIT(0);
4429 		else
4430 			flags = 0;
4431 
4432 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4433 		rp->features[idx].flags = cpu_to_le32(flags);
4434 		idx++;
4435 	}
4436 
4437 	if (hdev && ll_privacy_capable(hdev)) {
4438 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4439 			flags = BIT(0) | BIT(1);
4440 		else
4441 			flags = BIT(1);
4442 
4443 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4444 		rp->features[idx].flags = cpu_to_le32(flags);
4445 		idx++;
4446 	}
4447 
4448 	if (hdev && (aosp_has_quality_report(hdev) ||
4449 		     hdev->set_quality_report)) {
4450 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4451 			flags = BIT(0);
4452 		else
4453 			flags = 0;
4454 
4455 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4456 		rp->features[idx].flags = cpu_to_le32(flags);
4457 		idx++;
4458 	}
4459 
4460 	if (hdev && hdev->get_data_path_id) {
4461 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4462 			flags = BIT(0);
4463 		else
4464 			flags = 0;
4465 
4466 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4467 		rp->features[idx].flags = cpu_to_le32(flags);
4468 		idx++;
4469 	}
4470 
4471 	if (IS_ENABLED(CONFIG_BT_LE)) {
4472 		flags = iso_enabled() ? BIT(0) : 0;
4473 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4474 		rp->features[idx].flags = cpu_to_le32(flags);
4475 		idx++;
4476 	}
4477 
4478 	if (hdev && lmp_le_capable(hdev)) {
4479 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4480 			flags = BIT(0);
4481 		else
4482 			flags = 0;
4483 
4484 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4485 		rp->features[idx].flags = cpu_to_le32(flags);
4486 		idx++;
4487 	}
4488 
4489 	rp->feature_count = cpu_to_le16(idx);
4490 
4491 	/* After reading the experimental features information, enable
4492 	 * the events to update client on any future change.
4493 	 */
4494 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4495 
4496 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4497 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4498 				   0, rp, sizeof(*rp) + (20 * idx));
4499 
4500 	kfree(rp);
4501 	return status;
4502 }
4503 
4504 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4505 					  struct sock *skip)
4506 {
4507 	struct mgmt_ev_exp_feature_changed ev;
4508 
4509 	memset(&ev, 0, sizeof(ev));
4510 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4511 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4512 
4513 	// Do we need to be atomic with the conn_flags?
4514 	if (enabled && privacy_mode_capable(hdev))
4515 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4516 	else
4517 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4518 
4519 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4520 				  &ev, sizeof(ev),
4521 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4522 
4523 }
4524 
4525 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4526 			       bool enabled, struct sock *skip)
4527 {
4528 	struct mgmt_ev_exp_feature_changed ev;
4529 
4530 	memset(&ev, 0, sizeof(ev));
4531 	memcpy(ev.uuid, uuid, 16);
4532 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4533 
4534 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4535 				  &ev, sizeof(ev),
4536 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4537 }
4538 
4539 #define EXP_FEAT(_uuid, _set_func)	\
4540 {					\
4541 	.uuid = _uuid,			\
4542 	.set_func = _set_func,		\
4543 }
4544 
4545 /* The zero key uuid is special. Multiple exp features are set through it. */
4546 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4547 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4548 {
4549 	struct mgmt_rp_set_exp_feature rp;
4550 
4551 	memset(rp.uuid, 0, 16);
4552 	rp.flags = cpu_to_le32(0);
4553 
4554 #ifdef CONFIG_BT_FEATURE_DEBUG
4555 	if (!hdev) {
4556 		bool changed = bt_dbg_get();
4557 
4558 		bt_dbg_set(false);
4559 
4560 		if (changed)
4561 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4562 	}
4563 #endif
4564 
4565 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4566 		bool changed;
4567 
4568 		changed = hci_dev_test_and_clear_flag(hdev,
4569 						      HCI_ENABLE_LL_PRIVACY);
4570 		if (changed)
4571 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4572 					    sk);
4573 	}
4574 
4575 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4576 
4577 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4578 				 MGMT_OP_SET_EXP_FEATURE, 0,
4579 				 &rp, sizeof(rp));
4580 }
4581 
4582 #ifdef CONFIG_BT_FEATURE_DEBUG
4583 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4584 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4585 {
4586 	struct mgmt_rp_set_exp_feature rp;
4587 
4588 	bool val, changed;
4589 	int err;
4590 
4591 	/* Command requires to use the non-controller index */
4592 	if (hdev)
4593 		return mgmt_cmd_status(sk, hdev->id,
4594 				       MGMT_OP_SET_EXP_FEATURE,
4595 				       MGMT_STATUS_INVALID_INDEX);
4596 
4597 	/* Parameters are limited to a single octet */
4598 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4599 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4600 				       MGMT_OP_SET_EXP_FEATURE,
4601 				       MGMT_STATUS_INVALID_PARAMS);
4602 
4603 	/* Only boolean on/off is supported */
4604 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4605 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 				       MGMT_OP_SET_EXP_FEATURE,
4607 				       MGMT_STATUS_INVALID_PARAMS);
4608 
4609 	val = !!cp->param[0];
4610 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4611 	bt_dbg_set(val);
4612 
4613 	memcpy(rp.uuid, debug_uuid, 16);
4614 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4615 
4616 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4617 
4618 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4619 				MGMT_OP_SET_EXP_FEATURE, 0,
4620 				&rp, sizeof(rp));
4621 
4622 	if (changed)
4623 		exp_feature_changed(hdev, debug_uuid, val, sk);
4624 
4625 	return err;
4626 }
4627 #endif
4628 
4629 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4630 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4631 {
4632 	struct mgmt_rp_set_exp_feature rp;
4633 	bool val, changed;
4634 	int err;
4635 
4636 	/* Command requires to use the controller index */
4637 	if (!hdev)
4638 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4639 				       MGMT_OP_SET_EXP_FEATURE,
4640 				       MGMT_STATUS_INVALID_INDEX);
4641 
4642 	/* Changes can only be made when controller is powered down */
4643 	if (hdev_is_powered(hdev))
4644 		return mgmt_cmd_status(sk, hdev->id,
4645 				       MGMT_OP_SET_EXP_FEATURE,
4646 				       MGMT_STATUS_REJECTED);
4647 
4648 	/* Parameters are limited to a single octet */
4649 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4650 		return mgmt_cmd_status(sk, hdev->id,
4651 				       MGMT_OP_SET_EXP_FEATURE,
4652 				       MGMT_STATUS_INVALID_PARAMS);
4653 
4654 	/* Only boolean on/off is supported */
4655 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4656 		return mgmt_cmd_status(sk, hdev->id,
4657 				       MGMT_OP_SET_EXP_FEATURE,
4658 				       MGMT_STATUS_INVALID_PARAMS);
4659 
4660 	val = !!cp->param[0];
4661 
4662 	if (val) {
4663 		changed = !hci_dev_test_and_set_flag(hdev,
4664 						     HCI_MESH_EXPERIMENTAL);
4665 	} else {
4666 		hci_dev_clear_flag(hdev, HCI_MESH);
4667 		changed = hci_dev_test_and_clear_flag(hdev,
4668 						      HCI_MESH_EXPERIMENTAL);
4669 	}
4670 
4671 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4672 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4673 
4674 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4675 
4676 	err = mgmt_cmd_complete(sk, hdev->id,
4677 				MGMT_OP_SET_EXP_FEATURE, 0,
4678 				&rp, sizeof(rp));
4679 
4680 	if (changed)
4681 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4682 
4683 	return err;
4684 }
4685 
4686 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4687 				   struct mgmt_cp_set_exp_feature *cp,
4688 				   u16 data_len)
4689 {
4690 	struct mgmt_rp_set_exp_feature rp;
4691 	bool val, changed;
4692 	int err;
4693 	u32 flags;
4694 
4695 	/* Command requires to use the controller index */
4696 	if (!hdev)
4697 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4698 				       MGMT_OP_SET_EXP_FEATURE,
4699 				       MGMT_STATUS_INVALID_INDEX);
4700 
4701 	/* Changes can only be made when controller is powered down */
4702 	if (hdev_is_powered(hdev))
4703 		return mgmt_cmd_status(sk, hdev->id,
4704 				       MGMT_OP_SET_EXP_FEATURE,
4705 				       MGMT_STATUS_REJECTED);
4706 
4707 	/* Parameters are limited to a single octet */
4708 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709 		return mgmt_cmd_status(sk, hdev->id,
4710 				       MGMT_OP_SET_EXP_FEATURE,
4711 				       MGMT_STATUS_INVALID_PARAMS);
4712 
4713 	/* Only boolean on/off is supported */
4714 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715 		return mgmt_cmd_status(sk, hdev->id,
4716 				       MGMT_OP_SET_EXP_FEATURE,
4717 				       MGMT_STATUS_INVALID_PARAMS);
4718 
4719 	val = !!cp->param[0];
4720 
4721 	if (val) {
4722 		changed = !hci_dev_test_and_set_flag(hdev,
4723 						     HCI_ENABLE_LL_PRIVACY);
4724 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4725 
4726 		/* Enable LL privacy + supported settings changed */
4727 		flags = BIT(0) | BIT(1);
4728 	} else {
4729 		changed = hci_dev_test_and_clear_flag(hdev,
4730 						      HCI_ENABLE_LL_PRIVACY);
4731 
4732 		/* Disable LL privacy + supported settings changed */
4733 		flags = BIT(1);
4734 	}
4735 
4736 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4737 	rp.flags = cpu_to_le32(flags);
4738 
4739 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4740 
4741 	err = mgmt_cmd_complete(sk, hdev->id,
4742 				MGMT_OP_SET_EXP_FEATURE, 0,
4743 				&rp, sizeof(rp));
4744 
4745 	if (changed)
4746 		exp_ll_privacy_feature_changed(val, hdev, sk);
4747 
4748 	return err;
4749 }
4750 
4751 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4752 				   struct mgmt_cp_set_exp_feature *cp,
4753 				   u16 data_len)
4754 {
4755 	struct mgmt_rp_set_exp_feature rp;
4756 	bool val, changed;
4757 	int err;
4758 
4759 	/* Command requires to use a valid controller index */
4760 	if (!hdev)
4761 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 				       MGMT_OP_SET_EXP_FEATURE,
4763 				       MGMT_STATUS_INVALID_INDEX);
4764 
4765 	/* Parameters are limited to a single octet */
4766 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 		return mgmt_cmd_status(sk, hdev->id,
4768 				       MGMT_OP_SET_EXP_FEATURE,
4769 				       MGMT_STATUS_INVALID_PARAMS);
4770 
4771 	/* Only boolean on/off is supported */
4772 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 		return mgmt_cmd_status(sk, hdev->id,
4774 				       MGMT_OP_SET_EXP_FEATURE,
4775 				       MGMT_STATUS_INVALID_PARAMS);
4776 
4777 	hci_req_sync_lock(hdev);
4778 
4779 	val = !!cp->param[0];
4780 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4781 
4782 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4783 		err = mgmt_cmd_status(sk, hdev->id,
4784 				      MGMT_OP_SET_EXP_FEATURE,
4785 				      MGMT_STATUS_NOT_SUPPORTED);
4786 		goto unlock_quality_report;
4787 	}
4788 
4789 	if (changed) {
4790 		if (hdev->set_quality_report)
4791 			err = hdev->set_quality_report(hdev, val);
4792 		else
4793 			err = aosp_set_quality_report(hdev, val);
4794 
4795 		if (err) {
4796 			err = mgmt_cmd_status(sk, hdev->id,
4797 					      MGMT_OP_SET_EXP_FEATURE,
4798 					      MGMT_STATUS_FAILED);
4799 			goto unlock_quality_report;
4800 		}
4801 
4802 		if (val)
4803 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4804 		else
4805 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4806 	}
4807 
4808 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4809 
4810 	memcpy(rp.uuid, quality_report_uuid, 16);
4811 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4813 
4814 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4815 				&rp, sizeof(rp));
4816 
4817 	if (changed)
4818 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4819 
4820 unlock_quality_report:
4821 	hci_req_sync_unlock(hdev);
4822 	return err;
4823 }
4824 
4825 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4826 				  struct mgmt_cp_set_exp_feature *cp,
4827 				  u16 data_len)
4828 {
4829 	bool val, changed;
4830 	int err;
4831 	struct mgmt_rp_set_exp_feature rp;
4832 
4833 	/* Command requires to use a valid controller index */
4834 	if (!hdev)
4835 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836 				       MGMT_OP_SET_EXP_FEATURE,
4837 				       MGMT_STATUS_INVALID_INDEX);
4838 
4839 	/* Parameters are limited to a single octet */
4840 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841 		return mgmt_cmd_status(sk, hdev->id,
4842 				       MGMT_OP_SET_EXP_FEATURE,
4843 				       MGMT_STATUS_INVALID_PARAMS);
4844 
4845 	/* Only boolean on/off is supported */
4846 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847 		return mgmt_cmd_status(sk, hdev->id,
4848 				       MGMT_OP_SET_EXP_FEATURE,
4849 				       MGMT_STATUS_INVALID_PARAMS);
4850 
4851 	val = !!cp->param[0];
4852 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4853 
4854 	if (!hdev->get_data_path_id) {
4855 		return mgmt_cmd_status(sk, hdev->id,
4856 				       MGMT_OP_SET_EXP_FEATURE,
4857 				       MGMT_STATUS_NOT_SUPPORTED);
4858 	}
4859 
4860 	if (changed) {
4861 		if (val)
4862 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4863 		else
4864 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865 	}
4866 
4867 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4868 		    val, changed);
4869 
4870 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4871 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873 	err = mgmt_cmd_complete(sk, hdev->id,
4874 				MGMT_OP_SET_EXP_FEATURE, 0,
4875 				&rp, sizeof(rp));
4876 
4877 	if (changed)
4878 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4879 
4880 	return err;
4881 }
4882 
4883 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4884 					  struct mgmt_cp_set_exp_feature *cp,
4885 					  u16 data_len)
4886 {
4887 	bool val, changed;
4888 	int err;
4889 	struct mgmt_rp_set_exp_feature rp;
4890 
4891 	/* Command requires to use a valid controller index */
4892 	if (!hdev)
4893 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4894 				       MGMT_OP_SET_EXP_FEATURE,
4895 				       MGMT_STATUS_INVALID_INDEX);
4896 
4897 	/* Parameters are limited to a single octet */
4898 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899 		return mgmt_cmd_status(sk, hdev->id,
4900 				       MGMT_OP_SET_EXP_FEATURE,
4901 				       MGMT_STATUS_INVALID_PARAMS);
4902 
4903 	/* Only boolean on/off is supported */
4904 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905 		return mgmt_cmd_status(sk, hdev->id,
4906 				       MGMT_OP_SET_EXP_FEATURE,
4907 				       MGMT_STATUS_INVALID_PARAMS);
4908 
4909 	val = !!cp->param[0];
4910 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4911 
4912 	if (!hci_dev_le_state_simultaneous(hdev)) {
4913 		return mgmt_cmd_status(sk, hdev->id,
4914 				       MGMT_OP_SET_EXP_FEATURE,
4915 				       MGMT_STATUS_NOT_SUPPORTED);
4916 	}
4917 
4918 	if (changed) {
4919 		if (val)
4920 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4921 		else
4922 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923 	}
4924 
4925 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4926 		    val, changed);
4927 
4928 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4929 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4930 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931 	err = mgmt_cmd_complete(sk, hdev->id,
4932 				MGMT_OP_SET_EXP_FEATURE, 0,
4933 				&rp, sizeof(rp));
4934 
4935 	if (changed)
4936 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4937 
4938 	return err;
4939 }
4940 
4941 #ifdef CONFIG_BT_LE
4942 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4943 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4944 {
4945 	struct mgmt_rp_set_exp_feature rp;
4946 	bool val, changed = false;
4947 	int err;
4948 
4949 	/* Command requires to use the non-controller index */
4950 	if (hdev)
4951 		return mgmt_cmd_status(sk, hdev->id,
4952 				       MGMT_OP_SET_EXP_FEATURE,
4953 				       MGMT_STATUS_INVALID_INDEX);
4954 
4955 	/* Parameters are limited to a single octet */
4956 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4957 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 				       MGMT_OP_SET_EXP_FEATURE,
4959 				       MGMT_STATUS_INVALID_PARAMS);
4960 
4961 	/* Only boolean on/off is supported */
4962 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4963 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4964 				       MGMT_OP_SET_EXP_FEATURE,
4965 				       MGMT_STATUS_INVALID_PARAMS);
4966 
4967 	val = cp->param[0] ? true : false;
4968 	if (val)
4969 		err = iso_init();
4970 	else
4971 		err = iso_exit();
4972 
4973 	if (!err)
4974 		changed = true;
4975 
4976 	memcpy(rp.uuid, iso_socket_uuid, 16);
4977 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4978 
4979 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4980 
4981 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4982 				MGMT_OP_SET_EXP_FEATURE, 0,
4983 				&rp, sizeof(rp));
4984 
4985 	if (changed)
4986 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4987 
4988 	return err;
4989 }
4990 #endif
4991 
4992 static const struct mgmt_exp_feature {
4993 	const u8 *uuid;
4994 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4995 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4996 } exp_features[] = {
4997 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4998 #ifdef CONFIG_BT_FEATURE_DEBUG
4999 	EXP_FEAT(debug_uuid, set_debug_func),
5000 #endif
5001 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5002 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5003 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5004 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5005 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5006 #ifdef CONFIG_BT_LE
5007 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5008 #endif
5009 
5010 	/* end with a null feature */
5011 	EXP_FEAT(NULL, NULL)
5012 };
5013 
5014 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5015 			   void *data, u16 data_len)
5016 {
5017 	struct mgmt_cp_set_exp_feature *cp = data;
5018 	size_t i = 0;
5019 
5020 	bt_dev_dbg(hdev, "sock %p", sk);
5021 
5022 	for (i = 0; exp_features[i].uuid; i++) {
5023 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5024 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5025 	}
5026 
5027 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5028 			       MGMT_OP_SET_EXP_FEATURE,
5029 			       MGMT_STATUS_NOT_SUPPORTED);
5030 }
5031 
5032 static u32 get_params_flags(struct hci_dev *hdev,
5033 			    struct hci_conn_params *params)
5034 {
5035 	u32 flags = hdev->conn_flags;
5036 
5037 	/* Devices using RPAs can only be programmed in the acceptlist if
5038 	 * LL Privacy has been enable otherwise they cannot mark
5039 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5040 	 */
5041 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5042 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5043 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5044 
5045 	return flags;
5046 }
5047 
5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 			    u16 data_len)
5050 {
5051 	struct mgmt_cp_get_device_flags *cp = data;
5052 	struct mgmt_rp_get_device_flags rp;
5053 	struct bdaddr_list_with_flags *br_params;
5054 	struct hci_conn_params *params;
5055 	u32 supported_flags;
5056 	u32 current_flags = 0;
5057 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5058 
5059 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060 		   &cp->addr.bdaddr, cp->addr.type);
5061 
5062 	hci_dev_lock(hdev);
5063 
5064 	supported_flags = hdev->conn_flags;
5065 
5066 	memset(&rp, 0, sizeof(rp));
5067 
5068 	if (cp->addr.type == BDADDR_BREDR) {
5069 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5070 							      &cp->addr.bdaddr,
5071 							      cp->addr.type);
5072 		if (!br_params)
5073 			goto done;
5074 
5075 		current_flags = br_params->flags;
5076 	} else {
5077 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 						le_addr_type(cp->addr.type));
5079 		if (!params)
5080 			goto done;
5081 
5082 		supported_flags = get_params_flags(hdev, params);
5083 		current_flags = params->flags;
5084 	}
5085 
5086 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5087 	rp.addr.type = cp->addr.type;
5088 	rp.supported_flags = cpu_to_le32(supported_flags);
5089 	rp.current_flags = cpu_to_le32(current_flags);
5090 
5091 	status = MGMT_STATUS_SUCCESS;
5092 
5093 done:
5094 	hci_dev_unlock(hdev);
5095 
5096 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5097 				&rp, sizeof(rp));
5098 }
5099 
5100 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5101 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5102 				 u32 supported_flags, u32 current_flags)
5103 {
5104 	struct mgmt_ev_device_flags_changed ev;
5105 
5106 	bacpy(&ev.addr.bdaddr, bdaddr);
5107 	ev.addr.type = bdaddr_type;
5108 	ev.supported_flags = cpu_to_le32(supported_flags);
5109 	ev.current_flags = cpu_to_le32(current_flags);
5110 
5111 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5112 }
5113 
5114 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5115 			    u16 len)
5116 {
5117 	struct mgmt_cp_set_device_flags *cp = data;
5118 	struct bdaddr_list_with_flags *br_params;
5119 	struct hci_conn_params *params;
5120 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5121 	u32 supported_flags;
5122 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5123 
5124 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5125 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5126 
5127 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5128 	supported_flags = hdev->conn_flags;
5129 
5130 	if ((supported_flags | current_flags) != supported_flags) {
5131 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5132 			    current_flags, supported_flags);
5133 		goto done;
5134 	}
5135 
5136 	hci_dev_lock(hdev);
5137 
5138 	if (cp->addr.type == BDADDR_BREDR) {
5139 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5140 							      &cp->addr.bdaddr,
5141 							      cp->addr.type);
5142 
5143 		if (br_params) {
5144 			br_params->flags = current_flags;
5145 			status = MGMT_STATUS_SUCCESS;
5146 		} else {
5147 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5148 				    &cp->addr.bdaddr, cp->addr.type);
5149 		}
5150 
5151 		goto unlock;
5152 	}
5153 
5154 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5155 					le_addr_type(cp->addr.type));
5156 	if (!params) {
5157 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5158 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5159 		goto unlock;
5160 	}
5161 
5162 	supported_flags = get_params_flags(hdev, params);
5163 
5164 	if ((supported_flags | current_flags) != supported_flags) {
5165 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5166 			    current_flags, supported_flags);
5167 		goto unlock;
5168 	}
5169 
5170 	params->flags = current_flags;
5171 	status = MGMT_STATUS_SUCCESS;
5172 
5173 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5174 	 * has been set.
5175 	 */
5176 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5177 		hci_update_passive_scan(hdev);
5178 
5179 unlock:
5180 	hci_dev_unlock(hdev);
5181 
5182 done:
5183 	if (status == MGMT_STATUS_SUCCESS)
5184 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5185 				     supported_flags, current_flags);
5186 
5187 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5188 				 &cp->addr, sizeof(cp->addr));
5189 }
5190 
5191 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5192 				   u16 handle)
5193 {
5194 	struct mgmt_ev_adv_monitor_added ev;
5195 
5196 	ev.monitor_handle = cpu_to_le16(handle);
5197 
5198 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5199 }
5200 
5201 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5202 {
5203 	struct mgmt_ev_adv_monitor_removed ev;
5204 	struct mgmt_pending_cmd *cmd;
5205 	struct sock *sk_skip = NULL;
5206 	struct mgmt_cp_remove_adv_monitor *cp;
5207 
5208 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5209 	if (cmd) {
5210 		cp = cmd->param;
5211 
5212 		if (cp->monitor_handle)
5213 			sk_skip = cmd->sk;
5214 	}
5215 
5216 	ev.monitor_handle = cpu_to_le16(handle);
5217 
5218 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5219 }
5220 
5221 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5222 				 void *data, u16 len)
5223 {
5224 	struct adv_monitor *monitor = NULL;
5225 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5226 	int handle, err;
5227 	size_t rp_size = 0;
5228 	__u32 supported = 0;
5229 	__u32 enabled = 0;
5230 	__u16 num_handles = 0;
5231 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5232 
5233 	BT_DBG("request for %s", hdev->name);
5234 
5235 	hci_dev_lock(hdev);
5236 
5237 	if (msft_monitor_supported(hdev))
5238 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5239 
5240 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5241 		handles[num_handles++] = monitor->handle;
5242 
5243 	hci_dev_unlock(hdev);
5244 
5245 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5246 	rp = kmalloc(rp_size, GFP_KERNEL);
5247 	if (!rp)
5248 		return -ENOMEM;
5249 
5250 	/* All supported features are currently enabled */
5251 	enabled = supported;
5252 
5253 	rp->supported_features = cpu_to_le32(supported);
5254 	rp->enabled_features = cpu_to_le32(enabled);
5255 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5256 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5257 	rp->num_handles = cpu_to_le16(num_handles);
5258 	if (num_handles)
5259 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5260 
5261 	err = mgmt_cmd_complete(sk, hdev->id,
5262 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5263 				MGMT_STATUS_SUCCESS, rp, rp_size);
5264 
5265 	kfree(rp);
5266 
5267 	return err;
5268 }
5269 
5270 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5271 						   void *data, int status)
5272 {
5273 	struct mgmt_rp_add_adv_patterns_monitor rp;
5274 	struct mgmt_pending_cmd *cmd = data;
5275 	struct adv_monitor *monitor = cmd->user_data;
5276 
5277 	hci_dev_lock(hdev);
5278 
5279 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5280 
5281 	if (!status) {
5282 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5283 		hdev->adv_monitors_cnt++;
5284 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5285 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5286 		hci_update_passive_scan(hdev);
5287 	}
5288 
5289 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5290 			  mgmt_status(status), &rp, sizeof(rp));
5291 	mgmt_pending_remove(cmd);
5292 
5293 	hci_dev_unlock(hdev);
5294 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5295 		   rp.monitor_handle, status);
5296 }
5297 
5298 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5299 {
5300 	struct mgmt_pending_cmd *cmd = data;
5301 	struct adv_monitor *monitor = cmd->user_data;
5302 
5303 	return hci_add_adv_monitor(hdev, monitor);
5304 }
5305 
5306 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5307 				      struct adv_monitor *m, u8 status,
5308 				      void *data, u16 len, u16 op)
5309 {
5310 	struct mgmt_pending_cmd *cmd;
5311 	int err;
5312 
5313 	hci_dev_lock(hdev);
5314 
5315 	if (status)
5316 		goto unlock;
5317 
5318 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5319 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5320 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5321 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5322 		status = MGMT_STATUS_BUSY;
5323 		goto unlock;
5324 	}
5325 
5326 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5327 	if (!cmd) {
5328 		status = MGMT_STATUS_NO_RESOURCES;
5329 		goto unlock;
5330 	}
5331 
5332 	cmd->user_data = m;
5333 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5334 				 mgmt_add_adv_patterns_monitor_complete);
5335 	if (err) {
5336 		if (err == -ENOMEM)
5337 			status = MGMT_STATUS_NO_RESOURCES;
5338 		else
5339 			status = MGMT_STATUS_FAILED;
5340 
5341 		goto unlock;
5342 	}
5343 
5344 	hci_dev_unlock(hdev);
5345 
5346 	return 0;
5347 
5348 unlock:
5349 	hci_free_adv_monitor(hdev, m);
5350 	hci_dev_unlock(hdev);
5351 	return mgmt_cmd_status(sk, hdev->id, op, status);
5352 }
5353 
5354 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5355 				   struct mgmt_adv_rssi_thresholds *rssi)
5356 {
5357 	if (rssi) {
5358 		m->rssi.low_threshold = rssi->low_threshold;
5359 		m->rssi.low_threshold_timeout =
5360 		    __le16_to_cpu(rssi->low_threshold_timeout);
5361 		m->rssi.high_threshold = rssi->high_threshold;
5362 		m->rssi.high_threshold_timeout =
5363 		    __le16_to_cpu(rssi->high_threshold_timeout);
5364 		m->rssi.sampling_period = rssi->sampling_period;
5365 	} else {
5366 		/* Default values. These numbers are the least constricting
5367 		 * parameters for MSFT API to work, so it behaves as if there
5368 		 * are no rssi parameter to consider. May need to be changed
5369 		 * if other API are to be supported.
5370 		 */
5371 		m->rssi.low_threshold = -127;
5372 		m->rssi.low_threshold_timeout = 60;
5373 		m->rssi.high_threshold = -127;
5374 		m->rssi.high_threshold_timeout = 0;
5375 		m->rssi.sampling_period = 0;
5376 	}
5377 }
5378 
5379 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5380 				    struct mgmt_adv_pattern *patterns)
5381 {
5382 	u8 offset = 0, length = 0;
5383 	struct adv_pattern *p = NULL;
5384 	int i;
5385 
5386 	for (i = 0; i < pattern_count; i++) {
5387 		offset = patterns[i].offset;
5388 		length = patterns[i].length;
5389 		if (offset >= HCI_MAX_AD_LENGTH ||
5390 		    length > HCI_MAX_AD_LENGTH ||
5391 		    (offset + length) > HCI_MAX_AD_LENGTH)
5392 			return MGMT_STATUS_INVALID_PARAMS;
5393 
5394 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5395 		if (!p)
5396 			return MGMT_STATUS_NO_RESOURCES;
5397 
5398 		p->ad_type = patterns[i].ad_type;
5399 		p->offset = patterns[i].offset;
5400 		p->length = patterns[i].length;
5401 		memcpy(p->value, patterns[i].value, p->length);
5402 
5403 		INIT_LIST_HEAD(&p->list);
5404 		list_add(&p->list, &m->patterns);
5405 	}
5406 
5407 	return MGMT_STATUS_SUCCESS;
5408 }
5409 
5410 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5411 				    void *data, u16 len)
5412 {
5413 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5414 	struct adv_monitor *m = NULL;
5415 	u8 status = MGMT_STATUS_SUCCESS;
5416 	size_t expected_size = sizeof(*cp);
5417 
5418 	BT_DBG("request for %s", hdev->name);
5419 
5420 	if (len <= sizeof(*cp)) {
5421 		status = MGMT_STATUS_INVALID_PARAMS;
5422 		goto done;
5423 	}
5424 
5425 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 	if (len != expected_size) {
5427 		status = MGMT_STATUS_INVALID_PARAMS;
5428 		goto done;
5429 	}
5430 
5431 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5432 	if (!m) {
5433 		status = MGMT_STATUS_NO_RESOURCES;
5434 		goto done;
5435 	}
5436 
5437 	INIT_LIST_HEAD(&m->patterns);
5438 
5439 	parse_adv_monitor_rssi(m, NULL);
5440 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441 
5442 done:
5443 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5445 }
5446 
5447 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5448 					 void *data, u16 len)
5449 {
5450 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5451 	struct adv_monitor *m = NULL;
5452 	u8 status = MGMT_STATUS_SUCCESS;
5453 	size_t expected_size = sizeof(*cp);
5454 
5455 	BT_DBG("request for %s", hdev->name);
5456 
5457 	if (len <= sizeof(*cp)) {
5458 		status = MGMT_STATUS_INVALID_PARAMS;
5459 		goto done;
5460 	}
5461 
5462 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5463 	if (len != expected_size) {
5464 		status = MGMT_STATUS_INVALID_PARAMS;
5465 		goto done;
5466 	}
5467 
5468 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5469 	if (!m) {
5470 		status = MGMT_STATUS_NO_RESOURCES;
5471 		goto done;
5472 	}
5473 
5474 	INIT_LIST_HEAD(&m->patterns);
5475 
5476 	parse_adv_monitor_rssi(m, &cp->rssi);
5477 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5478 
5479 done:
5480 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5481 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5482 }
5483 
5484 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5485 					     void *data, int status)
5486 {
5487 	struct mgmt_rp_remove_adv_monitor rp;
5488 	struct mgmt_pending_cmd *cmd = data;
5489 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5490 
5491 	hci_dev_lock(hdev);
5492 
5493 	rp.monitor_handle = cp->monitor_handle;
5494 
5495 	if (!status)
5496 		hci_update_passive_scan(hdev);
5497 
5498 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5499 			  mgmt_status(status), &rp, sizeof(rp));
5500 	mgmt_pending_remove(cmd);
5501 
5502 	hci_dev_unlock(hdev);
5503 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5504 		   rp.monitor_handle, status);
5505 }
5506 
5507 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5508 {
5509 	struct mgmt_pending_cmd *cmd = data;
5510 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5511 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5512 
5513 	if (!handle)
5514 		return hci_remove_all_adv_monitor(hdev);
5515 
5516 	return hci_remove_single_adv_monitor(hdev, handle);
5517 }
5518 
5519 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5520 			      void *data, u16 len)
5521 {
5522 	struct mgmt_pending_cmd *cmd;
5523 	int err, status;
5524 
5525 	hci_dev_lock(hdev);
5526 
5527 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5528 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5529 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5530 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5531 		status = MGMT_STATUS_BUSY;
5532 		goto unlock;
5533 	}
5534 
5535 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5536 	if (!cmd) {
5537 		status = MGMT_STATUS_NO_RESOURCES;
5538 		goto unlock;
5539 	}
5540 
5541 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5542 				 mgmt_remove_adv_monitor_complete);
5543 
5544 	if (err) {
5545 		mgmt_pending_remove(cmd);
5546 
5547 		if (err == -ENOMEM)
5548 			status = MGMT_STATUS_NO_RESOURCES;
5549 		else
5550 			status = MGMT_STATUS_FAILED;
5551 
5552 		goto unlock;
5553 	}
5554 
5555 	hci_dev_unlock(hdev);
5556 
5557 	return 0;
5558 
5559 unlock:
5560 	hci_dev_unlock(hdev);
5561 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5562 			       status);
5563 }
5564 
5565 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5566 {
5567 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5568 	size_t rp_size = sizeof(mgmt_rp);
5569 	struct mgmt_pending_cmd *cmd = data;
5570 	struct sk_buff *skb = cmd->skb;
5571 	u8 status = mgmt_status(err);
5572 
5573 	if (!status) {
5574 		if (!skb)
5575 			status = MGMT_STATUS_FAILED;
5576 		else if (IS_ERR(skb))
5577 			status = mgmt_status(PTR_ERR(skb));
5578 		else
5579 			status = mgmt_status(skb->data[0]);
5580 	}
5581 
5582 	bt_dev_dbg(hdev, "status %d", status);
5583 
5584 	if (status) {
5585 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5586 		goto remove;
5587 	}
5588 
5589 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5590 
5591 	if (!bredr_sc_enabled(hdev)) {
5592 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5593 
5594 		if (skb->len < sizeof(*rp)) {
5595 			mgmt_cmd_status(cmd->sk, hdev->id,
5596 					MGMT_OP_READ_LOCAL_OOB_DATA,
5597 					MGMT_STATUS_FAILED);
5598 			goto remove;
5599 		}
5600 
5601 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5602 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5603 
5604 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5605 	} else {
5606 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5607 
5608 		if (skb->len < sizeof(*rp)) {
5609 			mgmt_cmd_status(cmd->sk, hdev->id,
5610 					MGMT_OP_READ_LOCAL_OOB_DATA,
5611 					MGMT_STATUS_FAILED);
5612 			goto remove;
5613 		}
5614 
5615 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5616 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5617 
5618 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5619 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5620 	}
5621 
5622 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5624 
5625 remove:
5626 	if (skb && !IS_ERR(skb))
5627 		kfree_skb(skb);
5628 
5629 	mgmt_pending_free(cmd);
5630 }
5631 
5632 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5633 {
5634 	struct mgmt_pending_cmd *cmd = data;
5635 
5636 	if (bredr_sc_enabled(hdev))
5637 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5638 	else
5639 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5640 
5641 	if (IS_ERR(cmd->skb))
5642 		return PTR_ERR(cmd->skb);
5643 	else
5644 		return 0;
5645 }
5646 
5647 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5648 			       void *data, u16 data_len)
5649 {
5650 	struct mgmt_pending_cmd *cmd;
5651 	int err;
5652 
5653 	bt_dev_dbg(hdev, "sock %p", sk);
5654 
5655 	hci_dev_lock(hdev);
5656 
5657 	if (!hdev_is_powered(hdev)) {
5658 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 				      MGMT_STATUS_NOT_POWERED);
5660 		goto unlock;
5661 	}
5662 
5663 	if (!lmp_ssp_capable(hdev)) {
5664 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5665 				      MGMT_STATUS_NOT_SUPPORTED);
5666 		goto unlock;
5667 	}
5668 
5669 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5670 	if (!cmd)
5671 		err = -ENOMEM;
5672 	else
5673 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5674 					 read_local_oob_data_complete);
5675 
5676 	if (err < 0) {
5677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678 				      MGMT_STATUS_FAILED);
5679 
5680 		if (cmd)
5681 			mgmt_pending_free(cmd);
5682 	}
5683 
5684 unlock:
5685 	hci_dev_unlock(hdev);
5686 	return err;
5687 }
5688 
5689 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5690 			       void *data, u16 len)
5691 {
5692 	struct mgmt_addr_info *addr = data;
5693 	int err;
5694 
5695 	bt_dev_dbg(hdev, "sock %p", sk);
5696 
5697 	if (!bdaddr_type_is_valid(addr->type))
5698 		return mgmt_cmd_complete(sk, hdev->id,
5699 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5700 					 MGMT_STATUS_INVALID_PARAMS,
5701 					 addr, sizeof(*addr));
5702 
5703 	hci_dev_lock(hdev);
5704 
5705 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5706 		struct mgmt_cp_add_remote_oob_data *cp = data;
5707 		u8 status;
5708 
5709 		if (cp->addr.type != BDADDR_BREDR) {
5710 			err = mgmt_cmd_complete(sk, hdev->id,
5711 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5712 						MGMT_STATUS_INVALID_PARAMS,
5713 						&cp->addr, sizeof(cp->addr));
5714 			goto unlock;
5715 		}
5716 
5717 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5718 					      cp->addr.type, cp->hash,
5719 					      cp->rand, NULL, NULL);
5720 		if (err < 0)
5721 			status = MGMT_STATUS_FAILED;
5722 		else
5723 			status = MGMT_STATUS_SUCCESS;
5724 
5725 		err = mgmt_cmd_complete(sk, hdev->id,
5726 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5727 					&cp->addr, sizeof(cp->addr));
5728 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5729 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5730 		u8 *rand192, *hash192, *rand256, *hash256;
5731 		u8 status;
5732 
5733 		if (bdaddr_type_is_le(cp->addr.type)) {
5734 			/* Enforce zero-valued 192-bit parameters as
5735 			 * long as legacy SMP OOB isn't implemented.
5736 			 */
5737 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5738 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5739 				err = mgmt_cmd_complete(sk, hdev->id,
5740 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5741 							MGMT_STATUS_INVALID_PARAMS,
5742 							addr, sizeof(*addr));
5743 				goto unlock;
5744 			}
5745 
5746 			rand192 = NULL;
5747 			hash192 = NULL;
5748 		} else {
5749 			/* In case one of the P-192 values is set to zero,
5750 			 * then just disable OOB data for P-192.
5751 			 */
5752 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5753 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5754 				rand192 = NULL;
5755 				hash192 = NULL;
5756 			} else {
5757 				rand192 = cp->rand192;
5758 				hash192 = cp->hash192;
5759 			}
5760 		}
5761 
5762 		/* In case one of the P-256 values is set to zero, then just
5763 		 * disable OOB data for P-256.
5764 		 */
5765 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5766 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5767 			rand256 = NULL;
5768 			hash256 = NULL;
5769 		} else {
5770 			rand256 = cp->rand256;
5771 			hash256 = cp->hash256;
5772 		}
5773 
5774 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5775 					      cp->addr.type, hash192, rand192,
5776 					      hash256, rand256);
5777 		if (err < 0)
5778 			status = MGMT_STATUS_FAILED;
5779 		else
5780 			status = MGMT_STATUS_SUCCESS;
5781 
5782 		err = mgmt_cmd_complete(sk, hdev->id,
5783 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 					status, &cp->addr, sizeof(cp->addr));
5785 	} else {
5786 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5787 			   len);
5788 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5789 				      MGMT_STATUS_INVALID_PARAMS);
5790 	}
5791 
5792 unlock:
5793 	hci_dev_unlock(hdev);
5794 	return err;
5795 }
5796 
5797 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5798 				  void *data, u16 len)
5799 {
5800 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5801 	u8 status;
5802 	int err;
5803 
5804 	bt_dev_dbg(hdev, "sock %p", sk);
5805 
5806 	if (cp->addr.type != BDADDR_BREDR)
5807 		return mgmt_cmd_complete(sk, hdev->id,
5808 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5809 					 MGMT_STATUS_INVALID_PARAMS,
5810 					 &cp->addr, sizeof(cp->addr));
5811 
5812 	hci_dev_lock(hdev);
5813 
5814 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815 		hci_remote_oob_data_clear(hdev);
5816 		status = MGMT_STATUS_SUCCESS;
5817 		goto done;
5818 	}
5819 
5820 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5821 	if (err < 0)
5822 		status = MGMT_STATUS_INVALID_PARAMS;
5823 	else
5824 		status = MGMT_STATUS_SUCCESS;
5825 
5826 done:
5827 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5828 				status, &cp->addr, sizeof(cp->addr));
5829 
5830 	hci_dev_unlock(hdev);
5831 	return err;
5832 }
5833 
5834 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5835 {
5836 	struct mgmt_pending_cmd *cmd;
5837 
5838 	bt_dev_dbg(hdev, "status %u", status);
5839 
5840 	hci_dev_lock(hdev);
5841 
5842 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5843 	if (!cmd)
5844 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5845 
5846 	if (!cmd)
5847 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5848 
5849 	if (cmd) {
5850 		cmd->cmd_complete(cmd, mgmt_status(status));
5851 		mgmt_pending_remove(cmd);
5852 	}
5853 
5854 	hci_dev_unlock(hdev);
5855 }
5856 
5857 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5858 				    uint8_t *mgmt_status)
5859 {
5860 	switch (type) {
5861 	case DISCOV_TYPE_LE:
5862 		*mgmt_status = mgmt_le_support(hdev);
5863 		if (*mgmt_status)
5864 			return false;
5865 		break;
5866 	case DISCOV_TYPE_INTERLEAVED:
5867 		*mgmt_status = mgmt_le_support(hdev);
5868 		if (*mgmt_status)
5869 			return false;
5870 		fallthrough;
5871 	case DISCOV_TYPE_BREDR:
5872 		*mgmt_status = mgmt_bredr_support(hdev);
5873 		if (*mgmt_status)
5874 			return false;
5875 		break;
5876 	default:
5877 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5878 		return false;
5879 	}
5880 
5881 	return true;
5882 }
5883 
5884 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5885 {
5886 	struct mgmt_pending_cmd *cmd = data;
5887 
5888 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5889 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5890 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5891 		return;
5892 
5893 	bt_dev_dbg(hdev, "err %d", err);
5894 
5895 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5896 			  cmd->param, 1);
5897 	mgmt_pending_remove(cmd);
5898 
5899 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5900 				DISCOVERY_FINDING);
5901 }
5902 
5903 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5904 {
5905 	return hci_start_discovery_sync(hdev);
5906 }
5907 
5908 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5909 				    u16 op, void *data, u16 len)
5910 {
5911 	struct mgmt_cp_start_discovery *cp = data;
5912 	struct mgmt_pending_cmd *cmd;
5913 	u8 status;
5914 	int err;
5915 
5916 	bt_dev_dbg(hdev, "sock %p", sk);
5917 
5918 	hci_dev_lock(hdev);
5919 
5920 	if (!hdev_is_powered(hdev)) {
5921 		err = mgmt_cmd_complete(sk, hdev->id, op,
5922 					MGMT_STATUS_NOT_POWERED,
5923 					&cp->type, sizeof(cp->type));
5924 		goto failed;
5925 	}
5926 
5927 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5928 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5929 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5930 					&cp->type, sizeof(cp->type));
5931 		goto failed;
5932 	}
5933 
5934 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5935 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5936 					&cp->type, sizeof(cp->type));
5937 		goto failed;
5938 	}
5939 
5940 	/* Can't start discovery when it is paused */
5941 	if (hdev->discovery_paused) {
5942 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5943 					&cp->type, sizeof(cp->type));
5944 		goto failed;
5945 	}
5946 
5947 	/* Clear the discovery filter first to free any previously
5948 	 * allocated memory for the UUID list.
5949 	 */
5950 	hci_discovery_filter_clear(hdev);
5951 
5952 	hdev->discovery.type = cp->type;
5953 	hdev->discovery.report_invalid_rssi = false;
5954 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5955 		hdev->discovery.limited = true;
5956 	else
5957 		hdev->discovery.limited = false;
5958 
5959 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5960 	if (!cmd) {
5961 		err = -ENOMEM;
5962 		goto failed;
5963 	}
5964 
5965 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5966 				 start_discovery_complete);
5967 	if (err < 0) {
5968 		mgmt_pending_remove(cmd);
5969 		goto failed;
5970 	}
5971 
5972 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5973 
5974 failed:
5975 	hci_dev_unlock(hdev);
5976 	return err;
5977 }
5978 
5979 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5980 			   void *data, u16 len)
5981 {
5982 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5983 					data, len);
5984 }
5985 
5986 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5987 				   void *data, u16 len)
5988 {
5989 	return start_discovery_internal(sk, hdev,
5990 					MGMT_OP_START_LIMITED_DISCOVERY,
5991 					data, len);
5992 }
5993 
5994 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5995 				   void *data, u16 len)
5996 {
5997 	struct mgmt_cp_start_service_discovery *cp = data;
5998 	struct mgmt_pending_cmd *cmd;
5999 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6000 	u16 uuid_count, expected_len;
6001 	u8 status;
6002 	int err;
6003 
6004 	bt_dev_dbg(hdev, "sock %p", sk);
6005 
6006 	hci_dev_lock(hdev);
6007 
6008 	if (!hdev_is_powered(hdev)) {
6009 		err = mgmt_cmd_complete(sk, hdev->id,
6010 					MGMT_OP_START_SERVICE_DISCOVERY,
6011 					MGMT_STATUS_NOT_POWERED,
6012 					&cp->type, sizeof(cp->type));
6013 		goto failed;
6014 	}
6015 
6016 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6017 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6018 		err = mgmt_cmd_complete(sk, hdev->id,
6019 					MGMT_OP_START_SERVICE_DISCOVERY,
6020 					MGMT_STATUS_BUSY, &cp->type,
6021 					sizeof(cp->type));
6022 		goto failed;
6023 	}
6024 
6025 	if (hdev->discovery_paused) {
6026 		err = mgmt_cmd_complete(sk, hdev->id,
6027 					MGMT_OP_START_SERVICE_DISCOVERY,
6028 					MGMT_STATUS_BUSY, &cp->type,
6029 					sizeof(cp->type));
6030 		goto failed;
6031 	}
6032 
6033 	uuid_count = __le16_to_cpu(cp->uuid_count);
6034 	if (uuid_count > max_uuid_count) {
6035 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6036 			   uuid_count);
6037 		err = mgmt_cmd_complete(sk, hdev->id,
6038 					MGMT_OP_START_SERVICE_DISCOVERY,
6039 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6040 					sizeof(cp->type));
6041 		goto failed;
6042 	}
6043 
6044 	expected_len = sizeof(*cp) + uuid_count * 16;
6045 	if (expected_len != len) {
6046 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6047 			   expected_len, len);
6048 		err = mgmt_cmd_complete(sk, hdev->id,
6049 					MGMT_OP_START_SERVICE_DISCOVERY,
6050 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6051 					sizeof(cp->type));
6052 		goto failed;
6053 	}
6054 
6055 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6056 		err = mgmt_cmd_complete(sk, hdev->id,
6057 					MGMT_OP_START_SERVICE_DISCOVERY,
6058 					status, &cp->type, sizeof(cp->type));
6059 		goto failed;
6060 	}
6061 
6062 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6063 			       hdev, data, len);
6064 	if (!cmd) {
6065 		err = -ENOMEM;
6066 		goto failed;
6067 	}
6068 
6069 	/* Clear the discovery filter first to free any previously
6070 	 * allocated memory for the UUID list.
6071 	 */
6072 	hci_discovery_filter_clear(hdev);
6073 
6074 	hdev->discovery.result_filtering = true;
6075 	hdev->discovery.type = cp->type;
6076 	hdev->discovery.rssi = cp->rssi;
6077 	hdev->discovery.uuid_count = uuid_count;
6078 
6079 	if (uuid_count > 0) {
6080 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6081 						GFP_KERNEL);
6082 		if (!hdev->discovery.uuids) {
6083 			err = mgmt_cmd_complete(sk, hdev->id,
6084 						MGMT_OP_START_SERVICE_DISCOVERY,
6085 						MGMT_STATUS_FAILED,
6086 						&cp->type, sizeof(cp->type));
6087 			mgmt_pending_remove(cmd);
6088 			goto failed;
6089 		}
6090 	}
6091 
6092 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6093 				 start_discovery_complete);
6094 	if (err < 0) {
6095 		mgmt_pending_remove(cmd);
6096 		goto failed;
6097 	}
6098 
6099 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6100 
6101 failed:
6102 	hci_dev_unlock(hdev);
6103 	return err;
6104 }
6105 
6106 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6107 {
6108 	struct mgmt_pending_cmd *cmd;
6109 
6110 	bt_dev_dbg(hdev, "status %u", status);
6111 
6112 	hci_dev_lock(hdev);
6113 
6114 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6115 	if (cmd) {
6116 		cmd->cmd_complete(cmd, mgmt_status(status));
6117 		mgmt_pending_remove(cmd);
6118 	}
6119 
6120 	hci_dev_unlock(hdev);
6121 }
6122 
6123 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6124 {
6125 	struct mgmt_pending_cmd *cmd = data;
6126 
6127 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6128 		return;
6129 
6130 	bt_dev_dbg(hdev, "err %d", err);
6131 
6132 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6133 			  cmd->param, 1);
6134 	mgmt_pending_remove(cmd);
6135 
6136 	if (!err)
6137 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6138 }
6139 
6140 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6141 {
6142 	return hci_stop_discovery_sync(hdev);
6143 }
6144 
6145 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6146 			  u16 len)
6147 {
6148 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6149 	struct mgmt_pending_cmd *cmd;
6150 	int err;
6151 
6152 	bt_dev_dbg(hdev, "sock %p", sk);
6153 
6154 	hci_dev_lock(hdev);
6155 
6156 	if (!hci_discovery_active(hdev)) {
6157 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6158 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6159 					sizeof(mgmt_cp->type));
6160 		goto unlock;
6161 	}
6162 
6163 	if (hdev->discovery.type != mgmt_cp->type) {
6164 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6165 					MGMT_STATUS_INVALID_PARAMS,
6166 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6167 		goto unlock;
6168 	}
6169 
6170 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6171 	if (!cmd) {
6172 		err = -ENOMEM;
6173 		goto unlock;
6174 	}
6175 
6176 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6177 				 stop_discovery_complete);
6178 	if (err < 0) {
6179 		mgmt_pending_remove(cmd);
6180 		goto unlock;
6181 	}
6182 
6183 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6184 
6185 unlock:
6186 	hci_dev_unlock(hdev);
6187 	return err;
6188 }
6189 
6190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6191 			u16 len)
6192 {
6193 	struct mgmt_cp_confirm_name *cp = data;
6194 	struct inquiry_entry *e;
6195 	int err;
6196 
6197 	bt_dev_dbg(hdev, "sock %p", sk);
6198 
6199 	hci_dev_lock(hdev);
6200 
6201 	if (!hci_discovery_active(hdev)) {
6202 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6203 					MGMT_STATUS_FAILED, &cp->addr,
6204 					sizeof(cp->addr));
6205 		goto failed;
6206 	}
6207 
6208 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6209 	if (!e) {
6210 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6211 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6212 					sizeof(cp->addr));
6213 		goto failed;
6214 	}
6215 
6216 	if (cp->name_known) {
6217 		e->name_state = NAME_KNOWN;
6218 		list_del(&e->list);
6219 	} else {
6220 		e->name_state = NAME_NEEDED;
6221 		hci_inquiry_cache_update_resolve(hdev, e);
6222 	}
6223 
6224 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6225 				&cp->addr, sizeof(cp->addr));
6226 
6227 failed:
6228 	hci_dev_unlock(hdev);
6229 	return err;
6230 }
6231 
6232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6233 			u16 len)
6234 {
6235 	struct mgmt_cp_block_device *cp = data;
6236 	u8 status;
6237 	int err;
6238 
6239 	bt_dev_dbg(hdev, "sock %p", sk);
6240 
6241 	if (!bdaddr_type_is_valid(cp->addr.type))
6242 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6243 					 MGMT_STATUS_INVALID_PARAMS,
6244 					 &cp->addr, sizeof(cp->addr));
6245 
6246 	hci_dev_lock(hdev);
6247 
6248 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6249 				  cp->addr.type);
6250 	if (err < 0) {
6251 		status = MGMT_STATUS_FAILED;
6252 		goto done;
6253 	}
6254 
6255 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6256 		   sk);
6257 	status = MGMT_STATUS_SUCCESS;
6258 
6259 done:
6260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6261 				&cp->addr, sizeof(cp->addr));
6262 
6263 	hci_dev_unlock(hdev);
6264 
6265 	return err;
6266 }
6267 
6268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6269 			  u16 len)
6270 {
6271 	struct mgmt_cp_unblock_device *cp = data;
6272 	u8 status;
6273 	int err;
6274 
6275 	bt_dev_dbg(hdev, "sock %p", sk);
6276 
6277 	if (!bdaddr_type_is_valid(cp->addr.type))
6278 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6279 					 MGMT_STATUS_INVALID_PARAMS,
6280 					 &cp->addr, sizeof(cp->addr));
6281 
6282 	hci_dev_lock(hdev);
6283 
6284 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6285 				  cp->addr.type);
6286 	if (err < 0) {
6287 		status = MGMT_STATUS_INVALID_PARAMS;
6288 		goto done;
6289 	}
6290 
6291 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6292 		   sk);
6293 	status = MGMT_STATUS_SUCCESS;
6294 
6295 done:
6296 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6297 				&cp->addr, sizeof(cp->addr));
6298 
6299 	hci_dev_unlock(hdev);
6300 
6301 	return err;
6302 }
6303 
6304 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6305 {
6306 	return hci_update_eir_sync(hdev);
6307 }
6308 
6309 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6310 			 u16 len)
6311 {
6312 	struct mgmt_cp_set_device_id *cp = data;
6313 	int err;
6314 	__u16 source;
6315 
6316 	bt_dev_dbg(hdev, "sock %p", sk);
6317 
6318 	source = __le16_to_cpu(cp->source);
6319 
6320 	if (source > 0x0002)
6321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6322 				       MGMT_STATUS_INVALID_PARAMS);
6323 
6324 	hci_dev_lock(hdev);
6325 
6326 	hdev->devid_source = source;
6327 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6328 	hdev->devid_product = __le16_to_cpu(cp->product);
6329 	hdev->devid_version = __le16_to_cpu(cp->version);
6330 
6331 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6332 				NULL, 0);
6333 
6334 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6335 
6336 	hci_dev_unlock(hdev);
6337 
6338 	return err;
6339 }
6340 
6341 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6342 {
6343 	if (err)
6344 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6345 	else
6346 		bt_dev_dbg(hdev, "status %d", err);
6347 }
6348 
6349 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6350 {
6351 	struct cmd_lookup match = { NULL, hdev };
6352 	u8 instance;
6353 	struct adv_info *adv_instance;
6354 	u8 status = mgmt_status(err);
6355 
6356 	if (status) {
6357 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6358 				     cmd_status_rsp, &status);
6359 		return;
6360 	}
6361 
6362 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6363 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6364 	else
6365 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6366 
6367 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6368 			     &match);
6369 
6370 	new_settings(hdev, match.sk);
6371 
6372 	if (match.sk)
6373 		sock_put(match.sk);
6374 
6375 	/* If "Set Advertising" was just disabled and instance advertising was
6376 	 * set up earlier, then re-enable multi-instance advertising.
6377 	 */
6378 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6379 	    list_empty(&hdev->adv_instances))
6380 		return;
6381 
6382 	instance = hdev->cur_adv_instance;
6383 	if (!instance) {
6384 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6385 							struct adv_info, list);
6386 		if (!adv_instance)
6387 			return;
6388 
6389 		instance = adv_instance->instance;
6390 	}
6391 
6392 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6393 
6394 	enable_advertising_instance(hdev, err);
6395 }
6396 
6397 static int set_adv_sync(struct hci_dev *hdev, void *data)
6398 {
6399 	struct mgmt_pending_cmd *cmd = data;
6400 	struct mgmt_mode *cp = cmd->param;
6401 	u8 val = !!cp->val;
6402 
6403 	if (cp->val == 0x02)
6404 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6405 	else
6406 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407 
6408 	cancel_adv_timeout(hdev);
6409 
6410 	if (val) {
6411 		/* Switch to instance "0" for the Set Advertising setting.
6412 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6413 		 * HCI_ADVERTISING flag is not yet set.
6414 		 */
6415 		hdev->cur_adv_instance = 0x00;
6416 
6417 		if (ext_adv_capable(hdev)) {
6418 			hci_start_ext_adv_sync(hdev, 0x00);
6419 		} else {
6420 			hci_update_adv_data_sync(hdev, 0x00);
6421 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6422 			hci_enable_advertising_sync(hdev);
6423 		}
6424 	} else {
6425 		hci_disable_advertising_sync(hdev);
6426 	}
6427 
6428 	return 0;
6429 }
6430 
6431 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6432 			   u16 len)
6433 {
6434 	struct mgmt_mode *cp = data;
6435 	struct mgmt_pending_cmd *cmd;
6436 	u8 val, status;
6437 	int err;
6438 
6439 	bt_dev_dbg(hdev, "sock %p", sk);
6440 
6441 	status = mgmt_le_support(hdev);
6442 	if (status)
6443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 				       status);
6445 
6446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 				       MGMT_STATUS_INVALID_PARAMS);
6449 
6450 	if (hdev->advertising_paused)
6451 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6452 				       MGMT_STATUS_BUSY);
6453 
6454 	hci_dev_lock(hdev);
6455 
6456 	val = !!cp->val;
6457 
6458 	/* The following conditions are ones which mean that we should
6459 	 * not do any HCI communication but directly send a mgmt
6460 	 * response to user space (after toggling the flag if
6461 	 * necessary).
6462 	 */
6463 	if (!hdev_is_powered(hdev) ||
6464 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6465 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6466 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6467 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6468 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6469 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6470 		bool changed;
6471 
6472 		if (cp->val) {
6473 			hdev->cur_adv_instance = 0x00;
6474 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6475 			if (cp->val == 0x02)
6476 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 			else
6478 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479 		} else {
6480 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6481 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482 		}
6483 
6484 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6485 		if (err < 0)
6486 			goto unlock;
6487 
6488 		if (changed)
6489 			err = new_settings(hdev, sk);
6490 
6491 		goto unlock;
6492 	}
6493 
6494 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6495 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6496 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6497 				      MGMT_STATUS_BUSY);
6498 		goto unlock;
6499 	}
6500 
6501 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6502 	if (!cmd)
6503 		err = -ENOMEM;
6504 	else
6505 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6506 					 set_advertising_complete);
6507 
6508 	if (err < 0 && cmd)
6509 		mgmt_pending_remove(cmd);
6510 
6511 unlock:
6512 	hci_dev_unlock(hdev);
6513 	return err;
6514 }
6515 
6516 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6517 			      void *data, u16 len)
6518 {
6519 	struct mgmt_cp_set_static_address *cp = data;
6520 	int err;
6521 
6522 	bt_dev_dbg(hdev, "sock %p", sk);
6523 
6524 	if (!lmp_le_capable(hdev))
6525 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 				       MGMT_STATUS_NOT_SUPPORTED);
6527 
6528 	if (hdev_is_powered(hdev))
6529 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6530 				       MGMT_STATUS_REJECTED);
6531 
6532 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6533 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6534 			return mgmt_cmd_status(sk, hdev->id,
6535 					       MGMT_OP_SET_STATIC_ADDRESS,
6536 					       MGMT_STATUS_INVALID_PARAMS);
6537 
6538 		/* Two most significant bits shall be set */
6539 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6540 			return mgmt_cmd_status(sk, hdev->id,
6541 					       MGMT_OP_SET_STATIC_ADDRESS,
6542 					       MGMT_STATUS_INVALID_PARAMS);
6543 	}
6544 
6545 	hci_dev_lock(hdev);
6546 
6547 	bacpy(&hdev->static_addr, &cp->bdaddr);
6548 
6549 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6550 	if (err < 0)
6551 		goto unlock;
6552 
6553 	err = new_settings(hdev, sk);
6554 
6555 unlock:
6556 	hci_dev_unlock(hdev);
6557 	return err;
6558 }
6559 
6560 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6561 			   void *data, u16 len)
6562 {
6563 	struct mgmt_cp_set_scan_params *cp = data;
6564 	__u16 interval, window;
6565 	int err;
6566 
6567 	bt_dev_dbg(hdev, "sock %p", sk);
6568 
6569 	if (!lmp_le_capable(hdev))
6570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 				       MGMT_STATUS_NOT_SUPPORTED);
6572 
6573 	interval = __le16_to_cpu(cp->interval);
6574 
6575 	if (interval < 0x0004 || interval > 0x4000)
6576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 				       MGMT_STATUS_INVALID_PARAMS);
6578 
6579 	window = __le16_to_cpu(cp->window);
6580 
6581 	if (window < 0x0004 || window > 0x4000)
6582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 				       MGMT_STATUS_INVALID_PARAMS);
6584 
6585 	if (window > interval)
6586 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6587 				       MGMT_STATUS_INVALID_PARAMS);
6588 
6589 	hci_dev_lock(hdev);
6590 
6591 	hdev->le_scan_interval = interval;
6592 	hdev->le_scan_window = window;
6593 
6594 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6595 				NULL, 0);
6596 
6597 	/* If background scan is running, restart it so new parameters are
6598 	 * loaded.
6599 	 */
6600 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6601 	    hdev->discovery.state == DISCOVERY_STOPPED)
6602 		hci_update_passive_scan(hdev);
6603 
6604 	hci_dev_unlock(hdev);
6605 
6606 	return err;
6607 }
6608 
6609 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6610 {
6611 	struct mgmt_pending_cmd *cmd = data;
6612 
6613 	bt_dev_dbg(hdev, "err %d", err);
6614 
6615 	if (err) {
6616 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6617 				mgmt_status(err));
6618 	} else {
6619 		struct mgmt_mode *cp = cmd->param;
6620 
6621 		if (cp->val)
6622 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6623 		else
6624 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6625 
6626 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627 		new_settings(hdev, cmd->sk);
6628 	}
6629 
6630 	mgmt_pending_free(cmd);
6631 }
6632 
6633 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6634 {
6635 	struct mgmt_pending_cmd *cmd = data;
6636 	struct mgmt_mode *cp = cmd->param;
6637 
6638 	return hci_write_fast_connectable_sync(hdev, cp->val);
6639 }
6640 
6641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6642 				void *data, u16 len)
6643 {
6644 	struct mgmt_mode *cp = data;
6645 	struct mgmt_pending_cmd *cmd;
6646 	int err;
6647 
6648 	bt_dev_dbg(hdev, "sock %p", sk);
6649 
6650 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6651 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6652 		return mgmt_cmd_status(sk, hdev->id,
6653 				       MGMT_OP_SET_FAST_CONNECTABLE,
6654 				       MGMT_STATUS_NOT_SUPPORTED);
6655 
6656 	if (cp->val != 0x00 && cp->val != 0x01)
6657 		return mgmt_cmd_status(sk, hdev->id,
6658 				       MGMT_OP_SET_FAST_CONNECTABLE,
6659 				       MGMT_STATUS_INVALID_PARAMS);
6660 
6661 	hci_dev_lock(hdev);
6662 
6663 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6664 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665 		goto unlock;
6666 	}
6667 
6668 	if (!hdev_is_powered(hdev)) {
6669 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6670 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6671 		new_settings(hdev, sk);
6672 		goto unlock;
6673 	}
6674 
6675 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6676 			       len);
6677 	if (!cmd)
6678 		err = -ENOMEM;
6679 	else
6680 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6681 					 fast_connectable_complete);
6682 
6683 	if (err < 0) {
6684 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6685 				MGMT_STATUS_FAILED);
6686 
6687 		if (cmd)
6688 			mgmt_pending_free(cmd);
6689 	}
6690 
6691 unlock:
6692 	hci_dev_unlock(hdev);
6693 
6694 	return err;
6695 }
6696 
6697 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6698 {
6699 	struct mgmt_pending_cmd *cmd = data;
6700 
6701 	bt_dev_dbg(hdev, "err %d", err);
6702 
6703 	if (err) {
6704 		u8 mgmt_err = mgmt_status(err);
6705 
6706 		/* We need to restore the flag if related HCI commands
6707 		 * failed.
6708 		 */
6709 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6710 
6711 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6712 	} else {
6713 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6714 		new_settings(hdev, cmd->sk);
6715 	}
6716 
6717 	mgmt_pending_free(cmd);
6718 }
6719 
6720 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6721 {
6722 	int status;
6723 
6724 	status = hci_write_fast_connectable_sync(hdev, false);
6725 
6726 	if (!status)
6727 		status = hci_update_scan_sync(hdev);
6728 
6729 	/* Since only the advertising data flags will change, there
6730 	 * is no need to update the scan response data.
6731 	 */
6732 	if (!status)
6733 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6734 
6735 	return status;
6736 }
6737 
6738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6739 {
6740 	struct mgmt_mode *cp = data;
6741 	struct mgmt_pending_cmd *cmd;
6742 	int err;
6743 
6744 	bt_dev_dbg(hdev, "sock %p", sk);
6745 
6746 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 				       MGMT_STATUS_NOT_SUPPORTED);
6749 
6750 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6751 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 				       MGMT_STATUS_REJECTED);
6753 
6754 	if (cp->val != 0x00 && cp->val != 0x01)
6755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6756 				       MGMT_STATUS_INVALID_PARAMS);
6757 
6758 	hci_dev_lock(hdev);
6759 
6760 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6761 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6762 		goto unlock;
6763 	}
6764 
6765 	if (!hdev_is_powered(hdev)) {
6766 		if (!cp->val) {
6767 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6768 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6769 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6770 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6771 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6772 		}
6773 
6774 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6775 
6776 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6777 		if (err < 0)
6778 			goto unlock;
6779 
6780 		err = new_settings(hdev, sk);
6781 		goto unlock;
6782 	}
6783 
6784 	/* Reject disabling when powered on */
6785 	if (!cp->val) {
6786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6787 				      MGMT_STATUS_REJECTED);
6788 		goto unlock;
6789 	} else {
6790 		/* When configuring a dual-mode controller to operate
6791 		 * with LE only and using a static address, then switching
6792 		 * BR/EDR back on is not allowed.
6793 		 *
6794 		 * Dual-mode controllers shall operate with the public
6795 		 * address as its identity address for BR/EDR and LE. So
6796 		 * reject the attempt to create an invalid configuration.
6797 		 *
6798 		 * The same restrictions applies when secure connections
6799 		 * has been enabled. For BR/EDR this is a controller feature
6800 		 * while for LE it is a host stack feature. This means that
6801 		 * switching BR/EDR back on when secure connections has been
6802 		 * enabled is not a supported transaction.
6803 		 */
6804 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6805 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6806 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6807 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808 					      MGMT_STATUS_REJECTED);
6809 			goto unlock;
6810 		}
6811 	}
6812 
6813 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6814 	if (!cmd)
6815 		err = -ENOMEM;
6816 	else
6817 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6818 					 set_bredr_complete);
6819 
6820 	if (err < 0) {
6821 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6822 				MGMT_STATUS_FAILED);
6823 		if (cmd)
6824 			mgmt_pending_free(cmd);
6825 
6826 		goto unlock;
6827 	}
6828 
6829 	/* We need to flip the bit already here so that
6830 	 * hci_req_update_adv_data generates the correct flags.
6831 	 */
6832 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6833 
6834 unlock:
6835 	hci_dev_unlock(hdev);
6836 	return err;
6837 }
6838 
6839 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6840 {
6841 	struct mgmt_pending_cmd *cmd = data;
6842 	struct mgmt_mode *cp;
6843 
6844 	bt_dev_dbg(hdev, "err %d", err);
6845 
6846 	if (err) {
6847 		u8 mgmt_err = mgmt_status(err);
6848 
6849 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6850 		goto done;
6851 	}
6852 
6853 	cp = cmd->param;
6854 
6855 	switch (cp->val) {
6856 	case 0x00:
6857 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6858 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 		break;
6860 	case 0x01:
6861 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 		break;
6864 	case 0x02:
6865 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6867 		break;
6868 	}
6869 
6870 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6871 	new_settings(hdev, cmd->sk);
6872 
6873 done:
6874 	mgmt_pending_free(cmd);
6875 }
6876 
6877 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6878 {
6879 	struct mgmt_pending_cmd *cmd = data;
6880 	struct mgmt_mode *cp = cmd->param;
6881 	u8 val = !!cp->val;
6882 
6883 	/* Force write of val */
6884 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6885 
6886 	return hci_write_sc_support_sync(hdev, val);
6887 }
6888 
6889 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6890 			   void *data, u16 len)
6891 {
6892 	struct mgmt_mode *cp = data;
6893 	struct mgmt_pending_cmd *cmd;
6894 	u8 val;
6895 	int err;
6896 
6897 	bt_dev_dbg(hdev, "sock %p", sk);
6898 
6899 	if (!lmp_sc_capable(hdev) &&
6900 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 				       MGMT_STATUS_NOT_SUPPORTED);
6903 
6904 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6905 	    lmp_sc_capable(hdev) &&
6906 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6907 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 				       MGMT_STATUS_REJECTED);
6909 
6910 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6911 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6912 				       MGMT_STATUS_INVALID_PARAMS);
6913 
6914 	hci_dev_lock(hdev);
6915 
6916 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6917 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6918 		bool changed;
6919 
6920 		if (cp->val) {
6921 			changed = !hci_dev_test_and_set_flag(hdev,
6922 							     HCI_SC_ENABLED);
6923 			if (cp->val == 0x02)
6924 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6925 			else
6926 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6927 		} else {
6928 			changed = hci_dev_test_and_clear_flag(hdev,
6929 							      HCI_SC_ENABLED);
6930 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6931 		}
6932 
6933 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6934 		if (err < 0)
6935 			goto failed;
6936 
6937 		if (changed)
6938 			err = new_settings(hdev, sk);
6939 
6940 		goto failed;
6941 	}
6942 
6943 	val = !!cp->val;
6944 
6945 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6946 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6947 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6948 		goto failed;
6949 	}
6950 
6951 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6952 	if (!cmd)
6953 		err = -ENOMEM;
6954 	else
6955 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6956 					 set_secure_conn_complete);
6957 
6958 	if (err < 0) {
6959 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6960 				MGMT_STATUS_FAILED);
6961 		if (cmd)
6962 			mgmt_pending_free(cmd);
6963 	}
6964 
6965 failed:
6966 	hci_dev_unlock(hdev);
6967 	return err;
6968 }
6969 
6970 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6971 			  void *data, u16 len)
6972 {
6973 	struct mgmt_mode *cp = data;
6974 	bool changed, use_changed;
6975 	int err;
6976 
6977 	bt_dev_dbg(hdev, "sock %p", sk);
6978 
6979 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6980 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6981 				       MGMT_STATUS_INVALID_PARAMS);
6982 
6983 	hci_dev_lock(hdev);
6984 
6985 	if (cp->val)
6986 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6987 	else
6988 		changed = hci_dev_test_and_clear_flag(hdev,
6989 						      HCI_KEEP_DEBUG_KEYS);
6990 
6991 	if (cp->val == 0x02)
6992 		use_changed = !hci_dev_test_and_set_flag(hdev,
6993 							 HCI_USE_DEBUG_KEYS);
6994 	else
6995 		use_changed = hci_dev_test_and_clear_flag(hdev,
6996 							  HCI_USE_DEBUG_KEYS);
6997 
6998 	if (hdev_is_powered(hdev) && use_changed &&
6999 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7000 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7001 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7002 			     sizeof(mode), &mode);
7003 	}
7004 
7005 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7006 	if (err < 0)
7007 		goto unlock;
7008 
7009 	if (changed)
7010 		err = new_settings(hdev, sk);
7011 
7012 unlock:
7013 	hci_dev_unlock(hdev);
7014 	return err;
7015 }
7016 
7017 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7018 		       u16 len)
7019 {
7020 	struct mgmt_cp_set_privacy *cp = cp_data;
7021 	bool changed;
7022 	int err;
7023 
7024 	bt_dev_dbg(hdev, "sock %p", sk);
7025 
7026 	if (!lmp_le_capable(hdev))
7027 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 				       MGMT_STATUS_NOT_SUPPORTED);
7029 
7030 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7031 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 				       MGMT_STATUS_INVALID_PARAMS);
7033 
7034 	if (hdev_is_powered(hdev))
7035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7036 				       MGMT_STATUS_REJECTED);
7037 
7038 	hci_dev_lock(hdev);
7039 
7040 	/* If user space supports this command it is also expected to
7041 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7042 	 */
7043 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7044 
7045 	if (cp->privacy) {
7046 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7047 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7048 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7049 		hci_adv_instances_set_rpa_expired(hdev, true);
7050 		if (cp->privacy == 0x02)
7051 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7052 		else
7053 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7054 	} else {
7055 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7056 		memset(hdev->irk, 0, sizeof(hdev->irk));
7057 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7058 		hci_adv_instances_set_rpa_expired(hdev, false);
7059 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7060 	}
7061 
7062 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7063 	if (err < 0)
7064 		goto unlock;
7065 
7066 	if (changed)
7067 		err = new_settings(hdev, sk);
7068 
7069 unlock:
7070 	hci_dev_unlock(hdev);
7071 	return err;
7072 }
7073 
7074 static bool irk_is_valid(struct mgmt_irk_info *irk)
7075 {
7076 	switch (irk->addr.type) {
7077 	case BDADDR_LE_PUBLIC:
7078 		return true;
7079 
7080 	case BDADDR_LE_RANDOM:
7081 		/* Two most significant bits shall be set */
7082 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7083 			return false;
7084 		return true;
7085 	}
7086 
7087 	return false;
7088 }
7089 
7090 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7091 		     u16 len)
7092 {
7093 	struct mgmt_cp_load_irks *cp = cp_data;
7094 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7095 				   sizeof(struct mgmt_irk_info));
7096 	u16 irk_count, expected_len;
7097 	int i, err;
7098 
7099 	bt_dev_dbg(hdev, "sock %p", sk);
7100 
7101 	if (!lmp_le_capable(hdev))
7102 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7103 				       MGMT_STATUS_NOT_SUPPORTED);
7104 
7105 	irk_count = __le16_to_cpu(cp->irk_count);
7106 	if (irk_count > max_irk_count) {
7107 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7108 			   irk_count);
7109 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7110 				       MGMT_STATUS_INVALID_PARAMS);
7111 	}
7112 
7113 	expected_len = struct_size(cp, irks, irk_count);
7114 	if (expected_len != len) {
7115 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7116 			   expected_len, len);
7117 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7118 				       MGMT_STATUS_INVALID_PARAMS);
7119 	}
7120 
7121 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7122 
7123 	for (i = 0; i < irk_count; i++) {
7124 		struct mgmt_irk_info *key = &cp->irks[i];
7125 
7126 		if (!irk_is_valid(key))
7127 			return mgmt_cmd_status(sk, hdev->id,
7128 					       MGMT_OP_LOAD_IRKS,
7129 					       MGMT_STATUS_INVALID_PARAMS);
7130 	}
7131 
7132 	hci_dev_lock(hdev);
7133 
7134 	hci_smp_irks_clear(hdev);
7135 
7136 	for (i = 0; i < irk_count; i++) {
7137 		struct mgmt_irk_info *irk = &cp->irks[i];
7138 
7139 		if (hci_is_blocked_key(hdev,
7140 				       HCI_BLOCKED_KEY_TYPE_IRK,
7141 				       irk->val)) {
7142 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7143 				    &irk->addr.bdaddr);
7144 			continue;
7145 		}
7146 
7147 		hci_add_irk(hdev, &irk->addr.bdaddr,
7148 			    le_addr_type(irk->addr.type), irk->val,
7149 			    BDADDR_ANY);
7150 	}
7151 
7152 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7153 
7154 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7155 
7156 	hci_dev_unlock(hdev);
7157 
7158 	return err;
7159 }
7160 
7161 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7162 {
7163 	if (key->initiator != 0x00 && key->initiator != 0x01)
7164 		return false;
7165 
7166 	switch (key->addr.type) {
7167 	case BDADDR_LE_PUBLIC:
7168 		return true;
7169 
7170 	case BDADDR_LE_RANDOM:
7171 		/* Two most significant bits shall be set */
7172 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7173 			return false;
7174 		return true;
7175 	}
7176 
7177 	return false;
7178 }
7179 
7180 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7181 			       void *cp_data, u16 len)
7182 {
7183 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7184 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7185 				   sizeof(struct mgmt_ltk_info));
7186 	u16 key_count, expected_len;
7187 	int i, err;
7188 
7189 	bt_dev_dbg(hdev, "sock %p", sk);
7190 
7191 	if (!lmp_le_capable(hdev))
7192 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7193 				       MGMT_STATUS_NOT_SUPPORTED);
7194 
7195 	key_count = __le16_to_cpu(cp->key_count);
7196 	if (key_count > max_key_count) {
7197 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7198 			   key_count);
7199 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7200 				       MGMT_STATUS_INVALID_PARAMS);
7201 	}
7202 
7203 	expected_len = struct_size(cp, keys, key_count);
7204 	if (expected_len != len) {
7205 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7206 			   expected_len, len);
7207 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7208 				       MGMT_STATUS_INVALID_PARAMS);
7209 	}
7210 
7211 	bt_dev_dbg(hdev, "key_count %u", key_count);
7212 
7213 	for (i = 0; i < key_count; i++) {
7214 		struct mgmt_ltk_info *key = &cp->keys[i];
7215 
7216 		if (!ltk_is_valid(key))
7217 			return mgmt_cmd_status(sk, hdev->id,
7218 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7219 					       MGMT_STATUS_INVALID_PARAMS);
7220 	}
7221 
7222 	hci_dev_lock(hdev);
7223 
7224 	hci_smp_ltks_clear(hdev);
7225 
7226 	for (i = 0; i < key_count; i++) {
7227 		struct mgmt_ltk_info *key = &cp->keys[i];
7228 		u8 type, authenticated;
7229 
7230 		if (hci_is_blocked_key(hdev,
7231 				       HCI_BLOCKED_KEY_TYPE_LTK,
7232 				       key->val)) {
7233 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7234 				    &key->addr.bdaddr);
7235 			continue;
7236 		}
7237 
7238 		switch (key->type) {
7239 		case MGMT_LTK_UNAUTHENTICATED:
7240 			authenticated = 0x00;
7241 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7242 			break;
7243 		case MGMT_LTK_AUTHENTICATED:
7244 			authenticated = 0x01;
7245 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7246 			break;
7247 		case MGMT_LTK_P256_UNAUTH:
7248 			authenticated = 0x00;
7249 			type = SMP_LTK_P256;
7250 			break;
7251 		case MGMT_LTK_P256_AUTH:
7252 			authenticated = 0x01;
7253 			type = SMP_LTK_P256;
7254 			break;
7255 		case MGMT_LTK_P256_DEBUG:
7256 			authenticated = 0x00;
7257 			type = SMP_LTK_P256_DEBUG;
7258 			fallthrough;
7259 		default:
7260 			continue;
7261 		}
7262 
7263 		hci_add_ltk(hdev, &key->addr.bdaddr,
7264 			    le_addr_type(key->addr.type), type, authenticated,
7265 			    key->val, key->enc_size, key->ediv, key->rand);
7266 	}
7267 
7268 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7269 			   NULL, 0);
7270 
7271 	hci_dev_unlock(hdev);
7272 
7273 	return err;
7274 }
7275 
7276 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7277 {
7278 	struct mgmt_pending_cmd *cmd = data;
7279 	struct hci_conn *conn = cmd->user_data;
7280 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7281 	struct mgmt_rp_get_conn_info rp;
7282 	u8 status;
7283 
7284 	bt_dev_dbg(hdev, "err %d", err);
7285 
7286 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7287 
7288 	status = mgmt_status(err);
7289 	if (status == MGMT_STATUS_SUCCESS) {
7290 		rp.rssi = conn->rssi;
7291 		rp.tx_power = conn->tx_power;
7292 		rp.max_tx_power = conn->max_tx_power;
7293 	} else {
7294 		rp.rssi = HCI_RSSI_INVALID;
7295 		rp.tx_power = HCI_TX_POWER_INVALID;
7296 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7297 	}
7298 
7299 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7300 			  &rp, sizeof(rp));
7301 
7302 	mgmt_pending_free(cmd);
7303 }
7304 
7305 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7306 {
7307 	struct mgmt_pending_cmd *cmd = data;
7308 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7309 	struct hci_conn *conn;
7310 	int err;
7311 	__le16   handle;
7312 
7313 	/* Make sure we are still connected */
7314 	if (cp->addr.type == BDADDR_BREDR)
7315 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7316 					       &cp->addr.bdaddr);
7317 	else
7318 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7319 
7320 	if (!conn || conn->state != BT_CONNECTED)
7321 		return MGMT_STATUS_NOT_CONNECTED;
7322 
7323 	cmd->user_data = conn;
7324 	handle = cpu_to_le16(conn->handle);
7325 
7326 	/* Refresh RSSI each time */
7327 	err = hci_read_rssi_sync(hdev, handle);
7328 
7329 	/* For LE links TX power does not change thus we don't need to
7330 	 * query for it once value is known.
7331 	 */
7332 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7333 		     conn->tx_power == HCI_TX_POWER_INVALID))
7334 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7335 
7336 	/* Max TX power needs to be read only once per connection */
7337 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7338 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7339 
7340 	return err;
7341 }
7342 
7343 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7344 			 u16 len)
7345 {
7346 	struct mgmt_cp_get_conn_info *cp = data;
7347 	struct mgmt_rp_get_conn_info rp;
7348 	struct hci_conn *conn;
7349 	unsigned long conn_info_age;
7350 	int err = 0;
7351 
7352 	bt_dev_dbg(hdev, "sock %p", sk);
7353 
7354 	memset(&rp, 0, sizeof(rp));
7355 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7356 	rp.addr.type = cp->addr.type;
7357 
7358 	if (!bdaddr_type_is_valid(cp->addr.type))
7359 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7360 					 MGMT_STATUS_INVALID_PARAMS,
7361 					 &rp, sizeof(rp));
7362 
7363 	hci_dev_lock(hdev);
7364 
7365 	if (!hdev_is_powered(hdev)) {
7366 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7367 					MGMT_STATUS_NOT_POWERED, &rp,
7368 					sizeof(rp));
7369 		goto unlock;
7370 	}
7371 
7372 	if (cp->addr.type == BDADDR_BREDR)
7373 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7374 					       &cp->addr.bdaddr);
7375 	else
7376 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7377 
7378 	if (!conn || conn->state != BT_CONNECTED) {
7379 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7380 					MGMT_STATUS_NOT_CONNECTED, &rp,
7381 					sizeof(rp));
7382 		goto unlock;
7383 	}
7384 
7385 	/* To avoid client trying to guess when to poll again for information we
7386 	 * calculate conn info age as random value between min/max set in hdev.
7387 	 */
7388 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7389 						 hdev->conn_info_max_age - 1);
7390 
7391 	/* Query controller to refresh cached values if they are too old or were
7392 	 * never read.
7393 	 */
7394 	if (time_after(jiffies, conn->conn_info_timestamp +
7395 		       msecs_to_jiffies(conn_info_age)) ||
7396 	    !conn->conn_info_timestamp) {
7397 		struct mgmt_pending_cmd *cmd;
7398 
7399 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7400 				       len);
7401 		if (!cmd) {
7402 			err = -ENOMEM;
7403 		} else {
7404 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7405 						 cmd, get_conn_info_complete);
7406 		}
7407 
7408 		if (err < 0) {
7409 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7410 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7411 
7412 			if (cmd)
7413 				mgmt_pending_free(cmd);
7414 
7415 			goto unlock;
7416 		}
7417 
7418 		conn->conn_info_timestamp = jiffies;
7419 	} else {
7420 		/* Cache is valid, just reply with values cached in hci_conn */
7421 		rp.rssi = conn->rssi;
7422 		rp.tx_power = conn->tx_power;
7423 		rp.max_tx_power = conn->max_tx_power;
7424 
7425 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7426 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7427 	}
7428 
7429 unlock:
7430 	hci_dev_unlock(hdev);
7431 	return err;
7432 }
7433 
7434 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7435 {
7436 	struct mgmt_pending_cmd *cmd = data;
7437 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7438 	struct mgmt_rp_get_clock_info rp;
7439 	struct hci_conn *conn = cmd->user_data;
7440 	u8 status = mgmt_status(err);
7441 
7442 	bt_dev_dbg(hdev, "err %d", err);
7443 
7444 	memset(&rp, 0, sizeof(rp));
7445 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7446 	rp.addr.type = cp->addr.type;
7447 
7448 	if (err)
7449 		goto complete;
7450 
7451 	rp.local_clock = cpu_to_le32(hdev->clock);
7452 
7453 	if (conn) {
7454 		rp.piconet_clock = cpu_to_le32(conn->clock);
7455 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7456 	}
7457 
7458 complete:
7459 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7460 			  sizeof(rp));
7461 
7462 	mgmt_pending_free(cmd);
7463 }
7464 
7465 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7466 {
7467 	struct mgmt_pending_cmd *cmd = data;
7468 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7469 	struct hci_cp_read_clock hci_cp;
7470 	struct hci_conn *conn;
7471 
7472 	memset(&hci_cp, 0, sizeof(hci_cp));
7473 	hci_read_clock_sync(hdev, &hci_cp);
7474 
7475 	/* Make sure connection still exists */
7476 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7477 	if (!conn || conn->state != BT_CONNECTED)
7478 		return MGMT_STATUS_NOT_CONNECTED;
7479 
7480 	cmd->user_data = conn;
7481 	hci_cp.handle = cpu_to_le16(conn->handle);
7482 	hci_cp.which = 0x01; /* Piconet clock */
7483 
7484 	return hci_read_clock_sync(hdev, &hci_cp);
7485 }
7486 
7487 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7488 								u16 len)
7489 {
7490 	struct mgmt_cp_get_clock_info *cp = data;
7491 	struct mgmt_rp_get_clock_info rp;
7492 	struct mgmt_pending_cmd *cmd;
7493 	struct hci_conn *conn;
7494 	int err;
7495 
7496 	bt_dev_dbg(hdev, "sock %p", sk);
7497 
7498 	memset(&rp, 0, sizeof(rp));
7499 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7500 	rp.addr.type = cp->addr.type;
7501 
7502 	if (cp->addr.type != BDADDR_BREDR)
7503 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7504 					 MGMT_STATUS_INVALID_PARAMS,
7505 					 &rp, sizeof(rp));
7506 
7507 	hci_dev_lock(hdev);
7508 
7509 	if (!hdev_is_powered(hdev)) {
7510 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7511 					MGMT_STATUS_NOT_POWERED, &rp,
7512 					sizeof(rp));
7513 		goto unlock;
7514 	}
7515 
7516 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7517 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7518 					       &cp->addr.bdaddr);
7519 		if (!conn || conn->state != BT_CONNECTED) {
7520 			err = mgmt_cmd_complete(sk, hdev->id,
7521 						MGMT_OP_GET_CLOCK_INFO,
7522 						MGMT_STATUS_NOT_CONNECTED,
7523 						&rp, sizeof(rp));
7524 			goto unlock;
7525 		}
7526 	} else {
7527 		conn = NULL;
7528 	}
7529 
7530 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7531 	if (!cmd)
7532 		err = -ENOMEM;
7533 	else
7534 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7535 					 get_clock_info_complete);
7536 
7537 	if (err < 0) {
7538 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7539 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7540 
7541 		if (cmd)
7542 			mgmt_pending_free(cmd);
7543 	}
7544 
7545 
7546 unlock:
7547 	hci_dev_unlock(hdev);
7548 	return err;
7549 }
7550 
7551 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7552 {
7553 	struct hci_conn *conn;
7554 
7555 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7556 	if (!conn)
7557 		return false;
7558 
7559 	if (conn->dst_type != type)
7560 		return false;
7561 
7562 	if (conn->state != BT_CONNECTED)
7563 		return false;
7564 
7565 	return true;
7566 }
7567 
7568 /* This function requires the caller holds hdev->lock */
7569 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7570 			       u8 addr_type, u8 auto_connect)
7571 {
7572 	struct hci_conn_params *params;
7573 
7574 	params = hci_conn_params_add(hdev, addr, addr_type);
7575 	if (!params)
7576 		return -EIO;
7577 
7578 	if (params->auto_connect == auto_connect)
7579 		return 0;
7580 
7581 	list_del_init(&params->action);
7582 
7583 	switch (auto_connect) {
7584 	case HCI_AUTO_CONN_DISABLED:
7585 	case HCI_AUTO_CONN_LINK_LOSS:
7586 		/* If auto connect is being disabled when we're trying to
7587 		 * connect to device, keep connecting.
7588 		 */
7589 		if (params->explicit_connect)
7590 			list_add(&params->action, &hdev->pend_le_conns);
7591 		break;
7592 	case HCI_AUTO_CONN_REPORT:
7593 		if (params->explicit_connect)
7594 			list_add(&params->action, &hdev->pend_le_conns);
7595 		else
7596 			list_add(&params->action, &hdev->pend_le_reports);
7597 		break;
7598 	case HCI_AUTO_CONN_DIRECT:
7599 	case HCI_AUTO_CONN_ALWAYS:
7600 		if (!is_connected(hdev, addr, addr_type))
7601 			list_add(&params->action, &hdev->pend_le_conns);
7602 		break;
7603 	}
7604 
7605 	params->auto_connect = auto_connect;
7606 
7607 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7608 		   addr, addr_type, auto_connect);
7609 
7610 	return 0;
7611 }
7612 
7613 static void device_added(struct sock *sk, struct hci_dev *hdev,
7614 			 bdaddr_t *bdaddr, u8 type, u8 action)
7615 {
7616 	struct mgmt_ev_device_added ev;
7617 
7618 	bacpy(&ev.addr.bdaddr, bdaddr);
7619 	ev.addr.type = type;
7620 	ev.action = action;
7621 
7622 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7623 }
7624 
7625 static int add_device_sync(struct hci_dev *hdev, void *data)
7626 {
7627 	return hci_update_passive_scan_sync(hdev);
7628 }
7629 
7630 static int add_device(struct sock *sk, struct hci_dev *hdev,
7631 		      void *data, u16 len)
7632 {
7633 	struct mgmt_cp_add_device *cp = data;
7634 	u8 auto_conn, addr_type;
7635 	struct hci_conn_params *params;
7636 	int err;
7637 	u32 current_flags = 0;
7638 	u32 supported_flags;
7639 
7640 	bt_dev_dbg(hdev, "sock %p", sk);
7641 
7642 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7643 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7644 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7645 					 MGMT_STATUS_INVALID_PARAMS,
7646 					 &cp->addr, sizeof(cp->addr));
7647 
7648 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7649 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7650 					 MGMT_STATUS_INVALID_PARAMS,
7651 					 &cp->addr, sizeof(cp->addr));
7652 
7653 	hci_dev_lock(hdev);
7654 
7655 	if (cp->addr.type == BDADDR_BREDR) {
7656 		/* Only incoming connections action is supported for now */
7657 		if (cp->action != 0x01) {
7658 			err = mgmt_cmd_complete(sk, hdev->id,
7659 						MGMT_OP_ADD_DEVICE,
7660 						MGMT_STATUS_INVALID_PARAMS,
7661 						&cp->addr, sizeof(cp->addr));
7662 			goto unlock;
7663 		}
7664 
7665 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7666 						     &cp->addr.bdaddr,
7667 						     cp->addr.type, 0);
7668 		if (err)
7669 			goto unlock;
7670 
7671 		hci_update_scan(hdev);
7672 
7673 		goto added;
7674 	}
7675 
7676 	addr_type = le_addr_type(cp->addr.type);
7677 
7678 	if (cp->action == 0x02)
7679 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7680 	else if (cp->action == 0x01)
7681 		auto_conn = HCI_AUTO_CONN_DIRECT;
7682 	else
7683 		auto_conn = HCI_AUTO_CONN_REPORT;
7684 
7685 	/* Kernel internally uses conn_params with resolvable private
7686 	 * address, but Add Device allows only identity addresses.
7687 	 * Make sure it is enforced before calling
7688 	 * hci_conn_params_lookup.
7689 	 */
7690 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7691 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7692 					MGMT_STATUS_INVALID_PARAMS,
7693 					&cp->addr, sizeof(cp->addr));
7694 		goto unlock;
7695 	}
7696 
7697 	/* If the connection parameters don't exist for this device,
7698 	 * they will be created and configured with defaults.
7699 	 */
7700 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7701 				auto_conn) < 0) {
7702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7703 					MGMT_STATUS_FAILED, &cp->addr,
7704 					sizeof(cp->addr));
7705 		goto unlock;
7706 	} else {
7707 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7708 						addr_type);
7709 		if (params)
7710 			current_flags = params->flags;
7711 	}
7712 
7713 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7714 	if (err < 0)
7715 		goto unlock;
7716 
7717 added:
7718 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7719 	supported_flags = hdev->conn_flags;
7720 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7721 			     supported_flags, current_flags);
7722 
7723 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7724 				MGMT_STATUS_SUCCESS, &cp->addr,
7725 				sizeof(cp->addr));
7726 
7727 unlock:
7728 	hci_dev_unlock(hdev);
7729 	return err;
7730 }
7731 
7732 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7733 			   bdaddr_t *bdaddr, u8 type)
7734 {
7735 	struct mgmt_ev_device_removed ev;
7736 
7737 	bacpy(&ev.addr.bdaddr, bdaddr);
7738 	ev.addr.type = type;
7739 
7740 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7741 }
7742 
7743 static int remove_device_sync(struct hci_dev *hdev, void *data)
7744 {
7745 	return hci_update_passive_scan_sync(hdev);
7746 }
7747 
7748 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7749 			 void *data, u16 len)
7750 {
7751 	struct mgmt_cp_remove_device *cp = data;
7752 	int err;
7753 
7754 	bt_dev_dbg(hdev, "sock %p", sk);
7755 
7756 	hci_dev_lock(hdev);
7757 
7758 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7759 		struct hci_conn_params *params;
7760 		u8 addr_type;
7761 
7762 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7763 			err = mgmt_cmd_complete(sk, hdev->id,
7764 						MGMT_OP_REMOVE_DEVICE,
7765 						MGMT_STATUS_INVALID_PARAMS,
7766 						&cp->addr, sizeof(cp->addr));
7767 			goto unlock;
7768 		}
7769 
7770 		if (cp->addr.type == BDADDR_BREDR) {
7771 			err = hci_bdaddr_list_del(&hdev->accept_list,
7772 						  &cp->addr.bdaddr,
7773 						  cp->addr.type);
7774 			if (err) {
7775 				err = mgmt_cmd_complete(sk, hdev->id,
7776 							MGMT_OP_REMOVE_DEVICE,
7777 							MGMT_STATUS_INVALID_PARAMS,
7778 							&cp->addr,
7779 							sizeof(cp->addr));
7780 				goto unlock;
7781 			}
7782 
7783 			hci_update_scan(hdev);
7784 
7785 			device_removed(sk, hdev, &cp->addr.bdaddr,
7786 				       cp->addr.type);
7787 			goto complete;
7788 		}
7789 
7790 		addr_type = le_addr_type(cp->addr.type);
7791 
7792 		/* Kernel internally uses conn_params with resolvable private
7793 		 * address, but Remove Device allows only identity addresses.
7794 		 * Make sure it is enforced before calling
7795 		 * hci_conn_params_lookup.
7796 		 */
7797 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7798 			err = mgmt_cmd_complete(sk, hdev->id,
7799 						MGMT_OP_REMOVE_DEVICE,
7800 						MGMT_STATUS_INVALID_PARAMS,
7801 						&cp->addr, sizeof(cp->addr));
7802 			goto unlock;
7803 		}
7804 
7805 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7806 						addr_type);
7807 		if (!params) {
7808 			err = mgmt_cmd_complete(sk, hdev->id,
7809 						MGMT_OP_REMOVE_DEVICE,
7810 						MGMT_STATUS_INVALID_PARAMS,
7811 						&cp->addr, sizeof(cp->addr));
7812 			goto unlock;
7813 		}
7814 
7815 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7816 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7817 			err = mgmt_cmd_complete(sk, hdev->id,
7818 						MGMT_OP_REMOVE_DEVICE,
7819 						MGMT_STATUS_INVALID_PARAMS,
7820 						&cp->addr, sizeof(cp->addr));
7821 			goto unlock;
7822 		}
7823 
7824 		list_del(&params->action);
7825 		list_del(&params->list);
7826 		kfree(params);
7827 
7828 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7829 	} else {
7830 		struct hci_conn_params *p, *tmp;
7831 		struct bdaddr_list *b, *btmp;
7832 
7833 		if (cp->addr.type) {
7834 			err = mgmt_cmd_complete(sk, hdev->id,
7835 						MGMT_OP_REMOVE_DEVICE,
7836 						MGMT_STATUS_INVALID_PARAMS,
7837 						&cp->addr, sizeof(cp->addr));
7838 			goto unlock;
7839 		}
7840 
7841 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7842 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7843 			list_del(&b->list);
7844 			kfree(b);
7845 		}
7846 
7847 		hci_update_scan(hdev);
7848 
7849 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7850 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7851 				continue;
7852 			device_removed(sk, hdev, &p->addr, p->addr_type);
7853 			if (p->explicit_connect) {
7854 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7855 				continue;
7856 			}
7857 			list_del(&p->action);
7858 			list_del(&p->list);
7859 			kfree(p);
7860 		}
7861 
7862 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7863 	}
7864 
7865 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7866 
7867 complete:
7868 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7869 				MGMT_STATUS_SUCCESS, &cp->addr,
7870 				sizeof(cp->addr));
7871 unlock:
7872 	hci_dev_unlock(hdev);
7873 	return err;
7874 }
7875 
7876 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7877 			   u16 len)
7878 {
7879 	struct mgmt_cp_load_conn_param *cp = data;
7880 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7881 				     sizeof(struct mgmt_conn_param));
7882 	u16 param_count, expected_len;
7883 	int i;
7884 
7885 	if (!lmp_le_capable(hdev))
7886 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7887 				       MGMT_STATUS_NOT_SUPPORTED);
7888 
7889 	param_count = __le16_to_cpu(cp->param_count);
7890 	if (param_count > max_param_count) {
7891 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7892 			   param_count);
7893 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7894 				       MGMT_STATUS_INVALID_PARAMS);
7895 	}
7896 
7897 	expected_len = struct_size(cp, params, param_count);
7898 	if (expected_len != len) {
7899 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7900 			   expected_len, len);
7901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7902 				       MGMT_STATUS_INVALID_PARAMS);
7903 	}
7904 
7905 	bt_dev_dbg(hdev, "param_count %u", param_count);
7906 
7907 	hci_dev_lock(hdev);
7908 
7909 	hci_conn_params_clear_disabled(hdev);
7910 
7911 	for (i = 0; i < param_count; i++) {
7912 		struct mgmt_conn_param *param = &cp->params[i];
7913 		struct hci_conn_params *hci_param;
7914 		u16 min, max, latency, timeout;
7915 		u8 addr_type;
7916 
7917 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7918 			   param->addr.type);
7919 
7920 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7921 			addr_type = ADDR_LE_DEV_PUBLIC;
7922 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7923 			addr_type = ADDR_LE_DEV_RANDOM;
7924 		} else {
7925 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7926 			continue;
7927 		}
7928 
7929 		min = le16_to_cpu(param->min_interval);
7930 		max = le16_to_cpu(param->max_interval);
7931 		latency = le16_to_cpu(param->latency);
7932 		timeout = le16_to_cpu(param->timeout);
7933 
7934 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7935 			   min, max, latency, timeout);
7936 
7937 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7938 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7939 			continue;
7940 		}
7941 
7942 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7943 						addr_type);
7944 		if (!hci_param) {
7945 			bt_dev_err(hdev, "failed to add connection parameters");
7946 			continue;
7947 		}
7948 
7949 		hci_param->conn_min_interval = min;
7950 		hci_param->conn_max_interval = max;
7951 		hci_param->conn_latency = latency;
7952 		hci_param->supervision_timeout = timeout;
7953 	}
7954 
7955 	hci_dev_unlock(hdev);
7956 
7957 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7958 				 NULL, 0);
7959 }
7960 
7961 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7962 			       void *data, u16 len)
7963 {
7964 	struct mgmt_cp_set_external_config *cp = data;
7965 	bool changed;
7966 	int err;
7967 
7968 	bt_dev_dbg(hdev, "sock %p", sk);
7969 
7970 	if (hdev_is_powered(hdev))
7971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7972 				       MGMT_STATUS_REJECTED);
7973 
7974 	if (cp->config != 0x00 && cp->config != 0x01)
7975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7976 				         MGMT_STATUS_INVALID_PARAMS);
7977 
7978 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7980 				       MGMT_STATUS_NOT_SUPPORTED);
7981 
7982 	hci_dev_lock(hdev);
7983 
7984 	if (cp->config)
7985 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7986 	else
7987 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7988 
7989 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7990 	if (err < 0)
7991 		goto unlock;
7992 
7993 	if (!changed)
7994 		goto unlock;
7995 
7996 	err = new_options(hdev, sk);
7997 
7998 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7999 		mgmt_index_removed(hdev);
8000 
8001 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8002 			hci_dev_set_flag(hdev, HCI_CONFIG);
8003 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8004 
8005 			queue_work(hdev->req_workqueue, &hdev->power_on);
8006 		} else {
8007 			set_bit(HCI_RAW, &hdev->flags);
8008 			mgmt_index_added(hdev);
8009 		}
8010 	}
8011 
8012 unlock:
8013 	hci_dev_unlock(hdev);
8014 	return err;
8015 }
8016 
8017 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8018 			      void *data, u16 len)
8019 {
8020 	struct mgmt_cp_set_public_address *cp = data;
8021 	bool changed;
8022 	int err;
8023 
8024 	bt_dev_dbg(hdev, "sock %p", sk);
8025 
8026 	if (hdev_is_powered(hdev))
8027 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8028 				       MGMT_STATUS_REJECTED);
8029 
8030 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8031 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8032 				       MGMT_STATUS_INVALID_PARAMS);
8033 
8034 	if (!hdev->set_bdaddr)
8035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8036 				       MGMT_STATUS_NOT_SUPPORTED);
8037 
8038 	hci_dev_lock(hdev);
8039 
8040 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8041 	bacpy(&hdev->public_addr, &cp->bdaddr);
8042 
8043 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8044 	if (err < 0)
8045 		goto unlock;
8046 
8047 	if (!changed)
8048 		goto unlock;
8049 
8050 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8051 		err = new_options(hdev, sk);
8052 
8053 	if (is_configured(hdev)) {
8054 		mgmt_index_removed(hdev);
8055 
8056 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8057 
8058 		hci_dev_set_flag(hdev, HCI_CONFIG);
8059 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8060 
8061 		queue_work(hdev->req_workqueue, &hdev->power_on);
8062 	}
8063 
8064 unlock:
8065 	hci_dev_unlock(hdev);
8066 	return err;
8067 }
8068 
8069 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8070 					     int err)
8071 {
8072 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8073 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8074 	u8 *h192, *r192, *h256, *r256;
8075 	struct mgmt_pending_cmd *cmd = data;
8076 	struct sk_buff *skb = cmd->skb;
8077 	u8 status = mgmt_status(err);
8078 	u16 eir_len;
8079 
8080 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8081 		return;
8082 
8083 	if (!status) {
8084 		if (!skb)
8085 			status = MGMT_STATUS_FAILED;
8086 		else if (IS_ERR(skb))
8087 			status = mgmt_status(PTR_ERR(skb));
8088 		else
8089 			status = mgmt_status(skb->data[0]);
8090 	}
8091 
8092 	bt_dev_dbg(hdev, "status %u", status);
8093 
8094 	mgmt_cp = cmd->param;
8095 
8096 	if (status) {
8097 		status = mgmt_status(status);
8098 		eir_len = 0;
8099 
8100 		h192 = NULL;
8101 		r192 = NULL;
8102 		h256 = NULL;
8103 		r256 = NULL;
8104 	} else if (!bredr_sc_enabled(hdev)) {
8105 		struct hci_rp_read_local_oob_data *rp;
8106 
8107 		if (skb->len != sizeof(*rp)) {
8108 			status = MGMT_STATUS_FAILED;
8109 			eir_len = 0;
8110 		} else {
8111 			status = MGMT_STATUS_SUCCESS;
8112 			rp = (void *)skb->data;
8113 
8114 			eir_len = 5 + 18 + 18;
8115 			h192 = rp->hash;
8116 			r192 = rp->rand;
8117 			h256 = NULL;
8118 			r256 = NULL;
8119 		}
8120 	} else {
8121 		struct hci_rp_read_local_oob_ext_data *rp;
8122 
8123 		if (skb->len != sizeof(*rp)) {
8124 			status = MGMT_STATUS_FAILED;
8125 			eir_len = 0;
8126 		} else {
8127 			status = MGMT_STATUS_SUCCESS;
8128 			rp = (void *)skb->data;
8129 
8130 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8131 				eir_len = 5 + 18 + 18;
8132 				h192 = NULL;
8133 				r192 = NULL;
8134 			} else {
8135 				eir_len = 5 + 18 + 18 + 18 + 18;
8136 				h192 = rp->hash192;
8137 				r192 = rp->rand192;
8138 			}
8139 
8140 			h256 = rp->hash256;
8141 			r256 = rp->rand256;
8142 		}
8143 	}
8144 
8145 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8146 	if (!mgmt_rp)
8147 		goto done;
8148 
8149 	if (eir_len == 0)
8150 		goto send_rsp;
8151 
8152 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8153 				  hdev->dev_class, 3);
8154 
8155 	if (h192 && r192) {
8156 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8157 					  EIR_SSP_HASH_C192, h192, 16);
8158 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8159 					  EIR_SSP_RAND_R192, r192, 16);
8160 	}
8161 
8162 	if (h256 && r256) {
8163 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8164 					  EIR_SSP_HASH_C256, h256, 16);
8165 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8166 					  EIR_SSP_RAND_R256, r256, 16);
8167 	}
8168 
8169 send_rsp:
8170 	mgmt_rp->type = mgmt_cp->type;
8171 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8172 
8173 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8174 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8175 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8176 	if (err < 0 || status)
8177 		goto done;
8178 
8179 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8180 
8181 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8182 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8183 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8184 done:
8185 	if (skb && !IS_ERR(skb))
8186 		kfree_skb(skb);
8187 
8188 	kfree(mgmt_rp);
8189 	mgmt_pending_remove(cmd);
8190 }
8191 
8192 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8193 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8194 {
8195 	struct mgmt_pending_cmd *cmd;
8196 	int err;
8197 
8198 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8199 			       cp, sizeof(*cp));
8200 	if (!cmd)
8201 		return -ENOMEM;
8202 
8203 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8204 				 read_local_oob_ext_data_complete);
8205 
8206 	if (err < 0) {
8207 		mgmt_pending_remove(cmd);
8208 		return err;
8209 	}
8210 
8211 	return 0;
8212 }
8213 
8214 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8215 				   void *data, u16 data_len)
8216 {
8217 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8218 	struct mgmt_rp_read_local_oob_ext_data *rp;
8219 	size_t rp_len;
8220 	u16 eir_len;
8221 	u8 status, flags, role, addr[7], hash[16], rand[16];
8222 	int err;
8223 
8224 	bt_dev_dbg(hdev, "sock %p", sk);
8225 
8226 	if (hdev_is_powered(hdev)) {
8227 		switch (cp->type) {
8228 		case BIT(BDADDR_BREDR):
8229 			status = mgmt_bredr_support(hdev);
8230 			if (status)
8231 				eir_len = 0;
8232 			else
8233 				eir_len = 5;
8234 			break;
8235 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8236 			status = mgmt_le_support(hdev);
8237 			if (status)
8238 				eir_len = 0;
8239 			else
8240 				eir_len = 9 + 3 + 18 + 18 + 3;
8241 			break;
8242 		default:
8243 			status = MGMT_STATUS_INVALID_PARAMS;
8244 			eir_len = 0;
8245 			break;
8246 		}
8247 	} else {
8248 		status = MGMT_STATUS_NOT_POWERED;
8249 		eir_len = 0;
8250 	}
8251 
8252 	rp_len = sizeof(*rp) + eir_len;
8253 	rp = kmalloc(rp_len, GFP_ATOMIC);
8254 	if (!rp)
8255 		return -ENOMEM;
8256 
8257 	if (!status && !lmp_ssp_capable(hdev)) {
8258 		status = MGMT_STATUS_NOT_SUPPORTED;
8259 		eir_len = 0;
8260 	}
8261 
8262 	if (status)
8263 		goto complete;
8264 
8265 	hci_dev_lock(hdev);
8266 
8267 	eir_len = 0;
8268 	switch (cp->type) {
8269 	case BIT(BDADDR_BREDR):
8270 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8271 			err = read_local_ssp_oob_req(hdev, sk, cp);
8272 			hci_dev_unlock(hdev);
8273 			if (!err)
8274 				goto done;
8275 
8276 			status = MGMT_STATUS_FAILED;
8277 			goto complete;
8278 		} else {
8279 			eir_len = eir_append_data(rp->eir, eir_len,
8280 						  EIR_CLASS_OF_DEV,
8281 						  hdev->dev_class, 3);
8282 		}
8283 		break;
8284 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8285 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8286 		    smp_generate_oob(hdev, hash, rand) < 0) {
8287 			hci_dev_unlock(hdev);
8288 			status = MGMT_STATUS_FAILED;
8289 			goto complete;
8290 		}
8291 
8292 		/* This should return the active RPA, but since the RPA
8293 		 * is only programmed on demand, it is really hard to fill
8294 		 * this in at the moment. For now disallow retrieving
8295 		 * local out-of-band data when privacy is in use.
8296 		 *
8297 		 * Returning the identity address will not help here since
8298 		 * pairing happens before the identity resolving key is
8299 		 * known and thus the connection establishment happens
8300 		 * based on the RPA and not the identity address.
8301 		 */
8302 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8303 			hci_dev_unlock(hdev);
8304 			status = MGMT_STATUS_REJECTED;
8305 			goto complete;
8306 		}
8307 
8308 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8309 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8310 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8311 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8312 			memcpy(addr, &hdev->static_addr, 6);
8313 			addr[6] = 0x01;
8314 		} else {
8315 			memcpy(addr, &hdev->bdaddr, 6);
8316 			addr[6] = 0x00;
8317 		}
8318 
8319 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8320 					  addr, sizeof(addr));
8321 
8322 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8323 			role = 0x02;
8324 		else
8325 			role = 0x01;
8326 
8327 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8328 					  &role, sizeof(role));
8329 
8330 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8331 			eir_len = eir_append_data(rp->eir, eir_len,
8332 						  EIR_LE_SC_CONFIRM,
8333 						  hash, sizeof(hash));
8334 
8335 			eir_len = eir_append_data(rp->eir, eir_len,
8336 						  EIR_LE_SC_RANDOM,
8337 						  rand, sizeof(rand));
8338 		}
8339 
8340 		flags = mgmt_get_adv_discov_flags(hdev);
8341 
8342 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8343 			flags |= LE_AD_NO_BREDR;
8344 
8345 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8346 					  &flags, sizeof(flags));
8347 		break;
8348 	}
8349 
8350 	hci_dev_unlock(hdev);
8351 
8352 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8353 
8354 	status = MGMT_STATUS_SUCCESS;
8355 
8356 complete:
8357 	rp->type = cp->type;
8358 	rp->eir_len = cpu_to_le16(eir_len);
8359 
8360 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8361 				status, rp, sizeof(*rp) + eir_len);
8362 	if (err < 0 || status)
8363 		goto done;
8364 
8365 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8366 				 rp, sizeof(*rp) + eir_len,
8367 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8368 
8369 done:
8370 	kfree(rp);
8371 
8372 	return err;
8373 }
8374 
8375 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8376 {
8377 	u32 flags = 0;
8378 
8379 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8380 	flags |= MGMT_ADV_FLAG_DISCOV;
8381 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8382 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8383 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8384 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8385 	flags |= MGMT_ADV_PARAM_DURATION;
8386 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8387 	flags |= MGMT_ADV_PARAM_INTERVALS;
8388 	flags |= MGMT_ADV_PARAM_TX_POWER;
8389 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8390 
8391 	/* In extended adv TX_POWER returned from Set Adv Param
8392 	 * will be always valid.
8393 	 */
8394 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8395 		flags |= MGMT_ADV_FLAG_TX_POWER;
8396 
8397 	if (ext_adv_capable(hdev)) {
8398 		flags |= MGMT_ADV_FLAG_SEC_1M;
8399 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8400 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8401 
8402 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
8403 			flags |= MGMT_ADV_FLAG_SEC_2M;
8404 
8405 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8406 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8407 	}
8408 
8409 	return flags;
8410 }
8411 
8412 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8413 			     void *data, u16 data_len)
8414 {
8415 	struct mgmt_rp_read_adv_features *rp;
8416 	size_t rp_len;
8417 	int err;
8418 	struct adv_info *adv_instance;
8419 	u32 supported_flags;
8420 	u8 *instance;
8421 
8422 	bt_dev_dbg(hdev, "sock %p", sk);
8423 
8424 	if (!lmp_le_capable(hdev))
8425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8426 				       MGMT_STATUS_REJECTED);
8427 
8428 	hci_dev_lock(hdev);
8429 
8430 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8431 	rp = kmalloc(rp_len, GFP_ATOMIC);
8432 	if (!rp) {
8433 		hci_dev_unlock(hdev);
8434 		return -ENOMEM;
8435 	}
8436 
8437 	supported_flags = get_supported_adv_flags(hdev);
8438 
8439 	rp->supported_flags = cpu_to_le32(supported_flags);
8440 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8441 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8442 	rp->max_instances = hdev->le_num_of_adv_sets;
8443 	rp->num_instances = hdev->adv_instance_cnt;
8444 
8445 	instance = rp->instance;
8446 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8447 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8448 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8449 			*instance = adv_instance->instance;
8450 			instance++;
8451 		} else {
8452 			rp->num_instances--;
8453 			rp_len--;
8454 		}
8455 	}
8456 
8457 	hci_dev_unlock(hdev);
8458 
8459 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8460 				MGMT_STATUS_SUCCESS, rp, rp_len);
8461 
8462 	kfree(rp);
8463 
8464 	return err;
8465 }
8466 
8467 static u8 calculate_name_len(struct hci_dev *hdev)
8468 {
8469 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8470 
8471 	return eir_append_local_name(hdev, buf, 0);
8472 }
8473 
8474 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8475 			   bool is_adv_data)
8476 {
8477 	u8 max_len = HCI_MAX_AD_LENGTH;
8478 
8479 	if (is_adv_data) {
8480 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8481 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8482 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8483 			max_len -= 3;
8484 
8485 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8486 			max_len -= 3;
8487 	} else {
8488 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8489 			max_len -= calculate_name_len(hdev);
8490 
8491 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8492 			max_len -= 4;
8493 	}
8494 
8495 	return max_len;
8496 }
8497 
8498 static bool flags_managed(u32 adv_flags)
8499 {
8500 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8501 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8502 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8503 }
8504 
8505 static bool tx_power_managed(u32 adv_flags)
8506 {
8507 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8508 }
8509 
8510 static bool name_managed(u32 adv_flags)
8511 {
8512 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8513 }
8514 
8515 static bool appearance_managed(u32 adv_flags)
8516 {
8517 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8518 }
8519 
8520 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8521 			      u8 len, bool is_adv_data)
8522 {
8523 	int i, cur_len;
8524 	u8 max_len;
8525 
8526 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8527 
8528 	if (len > max_len)
8529 		return false;
8530 
8531 	/* Make sure that the data is correctly formatted. */
8532 	for (i = 0; i < len; i += (cur_len + 1)) {
8533 		cur_len = data[i];
8534 
8535 		if (!cur_len)
8536 			continue;
8537 
8538 		if (data[i + 1] == EIR_FLAGS &&
8539 		    (!is_adv_data || flags_managed(adv_flags)))
8540 			return false;
8541 
8542 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8543 			return false;
8544 
8545 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8546 			return false;
8547 
8548 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8549 			return false;
8550 
8551 		if (data[i + 1] == EIR_APPEARANCE &&
8552 		    appearance_managed(adv_flags))
8553 			return false;
8554 
8555 		/* If the current field length would exceed the total data
8556 		 * length, then it's invalid.
8557 		 */
8558 		if (i + cur_len >= len)
8559 			return false;
8560 	}
8561 
8562 	return true;
8563 }
8564 
8565 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8566 {
8567 	u32 supported_flags, phy_flags;
8568 
8569 	/* The current implementation only supports a subset of the specified
8570 	 * flags. Also need to check mutual exclusiveness of sec flags.
8571 	 */
8572 	supported_flags = get_supported_adv_flags(hdev);
8573 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8574 	if (adv_flags & ~supported_flags ||
8575 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8576 		return false;
8577 
8578 	return true;
8579 }
8580 
8581 static bool adv_busy(struct hci_dev *hdev)
8582 {
8583 	return pending_find(MGMT_OP_SET_LE, hdev);
8584 }
8585 
8586 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8587 			     int err)
8588 {
8589 	struct adv_info *adv, *n;
8590 
8591 	bt_dev_dbg(hdev, "err %d", err);
8592 
8593 	hci_dev_lock(hdev);
8594 
8595 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8596 		u8 instance;
8597 
8598 		if (!adv->pending)
8599 			continue;
8600 
8601 		if (!err) {
8602 			adv->pending = false;
8603 			continue;
8604 		}
8605 
8606 		instance = adv->instance;
8607 
8608 		if (hdev->cur_adv_instance == instance)
8609 			cancel_adv_timeout(hdev);
8610 
8611 		hci_remove_adv_instance(hdev, instance);
8612 		mgmt_advertising_removed(sk, hdev, instance);
8613 	}
8614 
8615 	hci_dev_unlock(hdev);
8616 }
8617 
8618 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8619 {
8620 	struct mgmt_pending_cmd *cmd = data;
8621 	struct mgmt_cp_add_advertising *cp = cmd->param;
8622 	struct mgmt_rp_add_advertising rp;
8623 
8624 	memset(&rp, 0, sizeof(rp));
8625 
8626 	rp.instance = cp->instance;
8627 
8628 	if (err)
8629 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8630 				mgmt_status(err));
8631 	else
8632 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8633 				  mgmt_status(err), &rp, sizeof(rp));
8634 
8635 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8636 
8637 	mgmt_pending_free(cmd);
8638 }
8639 
8640 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8641 {
8642 	struct mgmt_pending_cmd *cmd = data;
8643 	struct mgmt_cp_add_advertising *cp = cmd->param;
8644 
8645 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8646 }
8647 
8648 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8649 			   void *data, u16 data_len)
8650 {
8651 	struct mgmt_cp_add_advertising *cp = data;
8652 	struct mgmt_rp_add_advertising rp;
8653 	u32 flags;
8654 	u8 status;
8655 	u16 timeout, duration;
8656 	unsigned int prev_instance_cnt;
8657 	u8 schedule_instance = 0;
8658 	struct adv_info *adv, *next_instance;
8659 	int err;
8660 	struct mgmt_pending_cmd *cmd;
8661 
8662 	bt_dev_dbg(hdev, "sock %p", sk);
8663 
8664 	status = mgmt_le_support(hdev);
8665 	if (status)
8666 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8667 				       status);
8668 
8669 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8670 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671 				       MGMT_STATUS_INVALID_PARAMS);
8672 
8673 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8674 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8675 				       MGMT_STATUS_INVALID_PARAMS);
8676 
8677 	flags = __le32_to_cpu(cp->flags);
8678 	timeout = __le16_to_cpu(cp->timeout);
8679 	duration = __le16_to_cpu(cp->duration);
8680 
8681 	if (!requested_adv_flags_are_valid(hdev, flags))
8682 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 				       MGMT_STATUS_INVALID_PARAMS);
8684 
8685 	hci_dev_lock(hdev);
8686 
8687 	if (timeout && !hdev_is_powered(hdev)) {
8688 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8689 				      MGMT_STATUS_REJECTED);
8690 		goto unlock;
8691 	}
8692 
8693 	if (adv_busy(hdev)) {
8694 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8695 				      MGMT_STATUS_BUSY);
8696 		goto unlock;
8697 	}
8698 
8699 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8700 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8701 			       cp->scan_rsp_len, false)) {
8702 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8703 				      MGMT_STATUS_INVALID_PARAMS);
8704 		goto unlock;
8705 	}
8706 
8707 	prev_instance_cnt = hdev->adv_instance_cnt;
8708 
8709 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8710 				   cp->adv_data_len, cp->data,
8711 				   cp->scan_rsp_len,
8712 				   cp->data + cp->adv_data_len,
8713 				   timeout, duration,
8714 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8715 				   hdev->le_adv_min_interval,
8716 				   hdev->le_adv_max_interval, 0);
8717 	if (IS_ERR(adv)) {
8718 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8719 				      MGMT_STATUS_FAILED);
8720 		goto unlock;
8721 	}
8722 
8723 	/* Only trigger an advertising added event if a new instance was
8724 	 * actually added.
8725 	 */
8726 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8727 		mgmt_advertising_added(sk, hdev, cp->instance);
8728 
8729 	if (hdev->cur_adv_instance == cp->instance) {
8730 		/* If the currently advertised instance is being changed then
8731 		 * cancel the current advertising and schedule the next
8732 		 * instance. If there is only one instance then the overridden
8733 		 * advertising data will be visible right away.
8734 		 */
8735 		cancel_adv_timeout(hdev);
8736 
8737 		next_instance = hci_get_next_instance(hdev, cp->instance);
8738 		if (next_instance)
8739 			schedule_instance = next_instance->instance;
8740 	} else if (!hdev->adv_instance_timeout) {
8741 		/* Immediately advertise the new instance if no other
8742 		 * instance is currently being advertised.
8743 		 */
8744 		schedule_instance = cp->instance;
8745 	}
8746 
8747 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8748 	 * there is no instance to be advertised then we have no HCI
8749 	 * communication to make. Simply return.
8750 	 */
8751 	if (!hdev_is_powered(hdev) ||
8752 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8753 	    !schedule_instance) {
8754 		rp.instance = cp->instance;
8755 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8756 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8757 		goto unlock;
8758 	}
8759 
8760 	/* We're good to go, update advertising data, parameters, and start
8761 	 * advertising.
8762 	 */
8763 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8764 			       data_len);
8765 	if (!cmd) {
8766 		err = -ENOMEM;
8767 		goto unlock;
8768 	}
8769 
8770 	cp->instance = schedule_instance;
8771 
8772 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8773 				 add_advertising_complete);
8774 	if (err < 0)
8775 		mgmt_pending_free(cmd);
8776 
8777 unlock:
8778 	hci_dev_unlock(hdev);
8779 
8780 	return err;
8781 }
8782 
8783 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8784 					int err)
8785 {
8786 	struct mgmt_pending_cmd *cmd = data;
8787 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8788 	struct mgmt_rp_add_ext_adv_params rp;
8789 	struct adv_info *adv;
8790 	u32 flags;
8791 
8792 	BT_DBG("%s", hdev->name);
8793 
8794 	hci_dev_lock(hdev);
8795 
8796 	adv = hci_find_adv_instance(hdev, cp->instance);
8797 	if (!adv)
8798 		goto unlock;
8799 
8800 	rp.instance = cp->instance;
8801 	rp.tx_power = adv->tx_power;
8802 
8803 	/* While we're at it, inform userspace of the available space for this
8804 	 * advertisement, given the flags that will be used.
8805 	 */
8806 	flags = __le32_to_cpu(cp->flags);
8807 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8808 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8809 
8810 	if (err) {
8811 		/* If this advertisement was previously advertising and we
8812 		 * failed to update it, we signal that it has been removed and
8813 		 * delete its structure
8814 		 */
8815 		if (!adv->pending)
8816 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8817 
8818 		hci_remove_adv_instance(hdev, cp->instance);
8819 
8820 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8821 				mgmt_status(err));
8822 	} else {
8823 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8824 				  mgmt_status(err), &rp, sizeof(rp));
8825 	}
8826 
8827 unlock:
8828 	if (cmd)
8829 		mgmt_pending_free(cmd);
8830 
8831 	hci_dev_unlock(hdev);
8832 }
8833 
8834 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8835 {
8836 	struct mgmt_pending_cmd *cmd = data;
8837 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8838 
8839 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8840 }
8841 
8842 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8843 			      void *data, u16 data_len)
8844 {
8845 	struct mgmt_cp_add_ext_adv_params *cp = data;
8846 	struct mgmt_rp_add_ext_adv_params rp;
8847 	struct mgmt_pending_cmd *cmd = NULL;
8848 	struct adv_info *adv;
8849 	u32 flags, min_interval, max_interval;
8850 	u16 timeout, duration;
8851 	u8 status;
8852 	s8 tx_power;
8853 	int err;
8854 
8855 	BT_DBG("%s", hdev->name);
8856 
8857 	status = mgmt_le_support(hdev);
8858 	if (status)
8859 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8860 				       status);
8861 
8862 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8864 				       MGMT_STATUS_INVALID_PARAMS);
8865 
8866 	/* The purpose of breaking add_advertising into two separate MGMT calls
8867 	 * for params and data is to allow more parameters to be added to this
8868 	 * structure in the future. For this reason, we verify that we have the
8869 	 * bare minimum structure we know of when the interface was defined. Any
8870 	 * extra parameters we don't know about will be ignored in this request.
8871 	 */
8872 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8874 				       MGMT_STATUS_INVALID_PARAMS);
8875 
8876 	flags = __le32_to_cpu(cp->flags);
8877 
8878 	if (!requested_adv_flags_are_valid(hdev, flags))
8879 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8880 				       MGMT_STATUS_INVALID_PARAMS);
8881 
8882 	hci_dev_lock(hdev);
8883 
8884 	/* In new interface, we require that we are powered to register */
8885 	if (!hdev_is_powered(hdev)) {
8886 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8887 				      MGMT_STATUS_REJECTED);
8888 		goto unlock;
8889 	}
8890 
8891 	if (adv_busy(hdev)) {
8892 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8893 				      MGMT_STATUS_BUSY);
8894 		goto unlock;
8895 	}
8896 
8897 	/* Parse defined parameters from request, use defaults otherwise */
8898 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8899 		  __le16_to_cpu(cp->timeout) : 0;
8900 
8901 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8902 		   __le16_to_cpu(cp->duration) :
8903 		   hdev->def_multi_adv_rotation_duration;
8904 
8905 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8906 		       __le32_to_cpu(cp->min_interval) :
8907 		       hdev->le_adv_min_interval;
8908 
8909 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8910 		       __le32_to_cpu(cp->max_interval) :
8911 		       hdev->le_adv_max_interval;
8912 
8913 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8914 		   cp->tx_power :
8915 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8916 
8917 	/* Create advertising instance with no advertising or response data */
8918 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8919 				   timeout, duration, tx_power, min_interval,
8920 				   max_interval, 0);
8921 
8922 	if (IS_ERR(adv)) {
8923 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8924 				      MGMT_STATUS_FAILED);
8925 		goto unlock;
8926 	}
8927 
8928 	/* Submit request for advertising params if ext adv available */
8929 	if (ext_adv_capable(hdev)) {
8930 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8931 				       data, data_len);
8932 		if (!cmd) {
8933 			err = -ENOMEM;
8934 			hci_remove_adv_instance(hdev, cp->instance);
8935 			goto unlock;
8936 		}
8937 
8938 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8939 					 add_ext_adv_params_complete);
8940 		if (err < 0)
8941 			mgmt_pending_free(cmd);
8942 	} else {
8943 		rp.instance = cp->instance;
8944 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8945 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8946 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8947 		err = mgmt_cmd_complete(sk, hdev->id,
8948 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8949 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8950 	}
8951 
8952 unlock:
8953 	hci_dev_unlock(hdev);
8954 
8955 	return err;
8956 }
8957 
8958 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8959 {
8960 	struct mgmt_pending_cmd *cmd = data;
8961 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8962 	struct mgmt_rp_add_advertising rp;
8963 
8964 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8965 
8966 	memset(&rp, 0, sizeof(rp));
8967 
8968 	rp.instance = cp->instance;
8969 
8970 	if (err)
8971 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8972 				mgmt_status(err));
8973 	else
8974 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8975 				  mgmt_status(err), &rp, sizeof(rp));
8976 
8977 	mgmt_pending_free(cmd);
8978 }
8979 
8980 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8981 {
8982 	struct mgmt_pending_cmd *cmd = data;
8983 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8984 	int err;
8985 
8986 	if (ext_adv_capable(hdev)) {
8987 		err = hci_update_adv_data_sync(hdev, cp->instance);
8988 		if (err)
8989 			return err;
8990 
8991 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8992 		if (err)
8993 			return err;
8994 
8995 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8996 	}
8997 
8998 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8999 }
9000 
9001 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9002 			    u16 data_len)
9003 {
9004 	struct mgmt_cp_add_ext_adv_data *cp = data;
9005 	struct mgmt_rp_add_ext_adv_data rp;
9006 	u8 schedule_instance = 0;
9007 	struct adv_info *next_instance;
9008 	struct adv_info *adv_instance;
9009 	int err = 0;
9010 	struct mgmt_pending_cmd *cmd;
9011 
9012 	BT_DBG("%s", hdev->name);
9013 
9014 	hci_dev_lock(hdev);
9015 
9016 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9017 
9018 	if (!adv_instance) {
9019 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9020 				      MGMT_STATUS_INVALID_PARAMS);
9021 		goto unlock;
9022 	}
9023 
9024 	/* In new interface, we require that we are powered to register */
9025 	if (!hdev_is_powered(hdev)) {
9026 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 				      MGMT_STATUS_REJECTED);
9028 		goto clear_new_instance;
9029 	}
9030 
9031 	if (adv_busy(hdev)) {
9032 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9033 				      MGMT_STATUS_BUSY);
9034 		goto clear_new_instance;
9035 	}
9036 
9037 	/* Validate new data */
9038 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9039 			       cp->adv_data_len, true) ||
9040 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9041 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9042 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9043 				      MGMT_STATUS_INVALID_PARAMS);
9044 		goto clear_new_instance;
9045 	}
9046 
9047 	/* Set the data in the advertising instance */
9048 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9049 				  cp->data, cp->scan_rsp_len,
9050 				  cp->data + cp->adv_data_len);
9051 
9052 	/* If using software rotation, determine next instance to use */
9053 	if (hdev->cur_adv_instance == cp->instance) {
9054 		/* If the currently advertised instance is being changed
9055 		 * then cancel the current advertising and schedule the
9056 		 * next instance. If there is only one instance then the
9057 		 * overridden advertising data will be visible right
9058 		 * away
9059 		 */
9060 		cancel_adv_timeout(hdev);
9061 
9062 		next_instance = hci_get_next_instance(hdev, cp->instance);
9063 		if (next_instance)
9064 			schedule_instance = next_instance->instance;
9065 	} else if (!hdev->adv_instance_timeout) {
9066 		/* Immediately advertise the new instance if no other
9067 		 * instance is currently being advertised.
9068 		 */
9069 		schedule_instance = cp->instance;
9070 	}
9071 
9072 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9073 	 * be advertised then we have no HCI communication to make.
9074 	 * Simply return.
9075 	 */
9076 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9077 		if (adv_instance->pending) {
9078 			mgmt_advertising_added(sk, hdev, cp->instance);
9079 			adv_instance->pending = false;
9080 		}
9081 		rp.instance = cp->instance;
9082 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9083 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9084 		goto unlock;
9085 	}
9086 
9087 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9088 			       data_len);
9089 	if (!cmd) {
9090 		err = -ENOMEM;
9091 		goto clear_new_instance;
9092 	}
9093 
9094 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9095 				 add_ext_adv_data_complete);
9096 	if (err < 0) {
9097 		mgmt_pending_free(cmd);
9098 		goto clear_new_instance;
9099 	}
9100 
9101 	/* We were successful in updating data, so trigger advertising_added
9102 	 * event if this is an instance that wasn't previously advertising. If
9103 	 * a failure occurs in the requests we initiated, we will remove the
9104 	 * instance again in add_advertising_complete
9105 	 */
9106 	if (adv_instance->pending)
9107 		mgmt_advertising_added(sk, hdev, cp->instance);
9108 
9109 	goto unlock;
9110 
9111 clear_new_instance:
9112 	hci_remove_adv_instance(hdev, cp->instance);
9113 
9114 unlock:
9115 	hci_dev_unlock(hdev);
9116 
9117 	return err;
9118 }
9119 
9120 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9121 					int err)
9122 {
9123 	struct mgmt_pending_cmd *cmd = data;
9124 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9125 	struct mgmt_rp_remove_advertising rp;
9126 
9127 	bt_dev_dbg(hdev, "err %d", err);
9128 
9129 	memset(&rp, 0, sizeof(rp));
9130 	rp.instance = cp->instance;
9131 
9132 	if (err)
9133 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9134 				mgmt_status(err));
9135 	else
9136 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9137 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9138 
9139 	mgmt_pending_free(cmd);
9140 }
9141 
9142 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9143 {
9144 	struct mgmt_pending_cmd *cmd = data;
9145 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9146 	int err;
9147 
9148 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9149 	if (err)
9150 		return err;
9151 
9152 	if (list_empty(&hdev->adv_instances))
9153 		err = hci_disable_advertising_sync(hdev);
9154 
9155 	return err;
9156 }
9157 
9158 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9159 			      void *data, u16 data_len)
9160 {
9161 	struct mgmt_cp_remove_advertising *cp = data;
9162 	struct mgmt_pending_cmd *cmd;
9163 	int err;
9164 
9165 	bt_dev_dbg(hdev, "sock %p", sk);
9166 
9167 	hci_dev_lock(hdev);
9168 
9169 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9170 		err = mgmt_cmd_status(sk, hdev->id,
9171 				      MGMT_OP_REMOVE_ADVERTISING,
9172 				      MGMT_STATUS_INVALID_PARAMS);
9173 		goto unlock;
9174 	}
9175 
9176 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9177 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9178 				      MGMT_STATUS_BUSY);
9179 		goto unlock;
9180 	}
9181 
9182 	if (list_empty(&hdev->adv_instances)) {
9183 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9184 				      MGMT_STATUS_INVALID_PARAMS);
9185 		goto unlock;
9186 	}
9187 
9188 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9189 			       data_len);
9190 	if (!cmd) {
9191 		err = -ENOMEM;
9192 		goto unlock;
9193 	}
9194 
9195 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9196 				 remove_advertising_complete);
9197 	if (err < 0)
9198 		mgmt_pending_free(cmd);
9199 
9200 unlock:
9201 	hci_dev_unlock(hdev);
9202 
9203 	return err;
9204 }
9205 
9206 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9207 			     void *data, u16 data_len)
9208 {
9209 	struct mgmt_cp_get_adv_size_info *cp = data;
9210 	struct mgmt_rp_get_adv_size_info rp;
9211 	u32 flags, supported_flags;
9212 
9213 	bt_dev_dbg(hdev, "sock %p", sk);
9214 
9215 	if (!lmp_le_capable(hdev))
9216 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9217 				       MGMT_STATUS_REJECTED);
9218 
9219 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9220 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9221 				       MGMT_STATUS_INVALID_PARAMS);
9222 
9223 	flags = __le32_to_cpu(cp->flags);
9224 
9225 	/* The current implementation only supports a subset of the specified
9226 	 * flags.
9227 	 */
9228 	supported_flags = get_supported_adv_flags(hdev);
9229 	if (flags & ~supported_flags)
9230 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9231 				       MGMT_STATUS_INVALID_PARAMS);
9232 
9233 	rp.instance = cp->instance;
9234 	rp.flags = cp->flags;
9235 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9236 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9237 
9238 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9239 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9240 }
9241 
9242 static const struct hci_mgmt_handler mgmt_handlers[] = {
9243 	{ NULL }, /* 0x0000 (no command) */
9244 	{ read_version,            MGMT_READ_VERSION_SIZE,
9245 						HCI_MGMT_NO_HDEV |
9246 						HCI_MGMT_UNTRUSTED },
9247 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9248 						HCI_MGMT_NO_HDEV |
9249 						HCI_MGMT_UNTRUSTED },
9250 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9251 						HCI_MGMT_NO_HDEV |
9252 						HCI_MGMT_UNTRUSTED },
9253 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9254 						HCI_MGMT_UNTRUSTED },
9255 	{ set_powered,             MGMT_SETTING_SIZE },
9256 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9257 	{ set_connectable,         MGMT_SETTING_SIZE },
9258 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9259 	{ set_bondable,            MGMT_SETTING_SIZE },
9260 	{ set_link_security,       MGMT_SETTING_SIZE },
9261 	{ set_ssp,                 MGMT_SETTING_SIZE },
9262 	{ set_hs,                  MGMT_SETTING_SIZE },
9263 	{ set_le,                  MGMT_SETTING_SIZE },
9264 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9265 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9266 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9267 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9268 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9269 						HCI_MGMT_VAR_LEN },
9270 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9271 						HCI_MGMT_VAR_LEN },
9272 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9273 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9274 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9275 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9276 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9277 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9278 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9279 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9280 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9281 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9282 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9283 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9284 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9285 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9286 						HCI_MGMT_VAR_LEN },
9287 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9288 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9289 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9290 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9291 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9292 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9293 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9294 	{ set_advertising,         MGMT_SETTING_SIZE },
9295 	{ set_bredr,               MGMT_SETTING_SIZE },
9296 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9297 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9298 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9299 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9300 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9301 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9302 						HCI_MGMT_VAR_LEN },
9303 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9304 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9305 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9306 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9307 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9308 						HCI_MGMT_VAR_LEN },
9309 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9310 						HCI_MGMT_NO_HDEV |
9311 						HCI_MGMT_UNTRUSTED },
9312 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9313 						HCI_MGMT_UNCONFIGURED |
9314 						HCI_MGMT_UNTRUSTED },
9315 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9316 						HCI_MGMT_UNCONFIGURED },
9317 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9318 						HCI_MGMT_UNCONFIGURED },
9319 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9320 						HCI_MGMT_VAR_LEN },
9321 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9322 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9323 						HCI_MGMT_NO_HDEV |
9324 						HCI_MGMT_UNTRUSTED },
9325 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9326 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9327 						HCI_MGMT_VAR_LEN },
9328 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9329 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9330 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9331 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9332 						HCI_MGMT_UNTRUSTED },
9333 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9334 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9335 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9336 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9337 						HCI_MGMT_VAR_LEN },
9338 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9339 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9340 						HCI_MGMT_UNTRUSTED },
9341 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9342 						HCI_MGMT_UNTRUSTED |
9343 						HCI_MGMT_HDEV_OPTIONAL },
9344 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9345 						HCI_MGMT_VAR_LEN |
9346 						HCI_MGMT_HDEV_OPTIONAL },
9347 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9348 						HCI_MGMT_UNTRUSTED },
9349 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9350 						HCI_MGMT_VAR_LEN },
9351 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9352 						HCI_MGMT_UNTRUSTED },
9353 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9354 						HCI_MGMT_VAR_LEN },
9355 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9356 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9357 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9358 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9359 						HCI_MGMT_VAR_LEN },
9360 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9361 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9362 						HCI_MGMT_VAR_LEN },
9363 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9364 						HCI_MGMT_VAR_LEN },
9365 	{ add_adv_patterns_monitor_rssi,
9366 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE },
9367 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9368 						HCI_MGMT_VAR_LEN },
9369 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9370 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9371 						HCI_MGMT_VAR_LEN },
9372 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9373 };
9374 
9375 void mgmt_index_added(struct hci_dev *hdev)
9376 {
9377 	struct mgmt_ev_ext_index ev;
9378 
9379 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9380 		return;
9381 
9382 	switch (hdev->dev_type) {
9383 	case HCI_PRIMARY:
9384 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9385 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9386 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9387 			ev.type = 0x01;
9388 		} else {
9389 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9390 					 HCI_MGMT_INDEX_EVENTS);
9391 			ev.type = 0x00;
9392 		}
9393 		break;
9394 	case HCI_AMP:
9395 		ev.type = 0x02;
9396 		break;
9397 	default:
9398 		return;
9399 	}
9400 
9401 	ev.bus = hdev->bus;
9402 
9403 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9404 			 HCI_MGMT_EXT_INDEX_EVENTS);
9405 }
9406 
9407 void mgmt_index_removed(struct hci_dev *hdev)
9408 {
9409 	struct mgmt_ev_ext_index ev;
9410 	u8 status = MGMT_STATUS_INVALID_INDEX;
9411 
9412 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9413 		return;
9414 
9415 	switch (hdev->dev_type) {
9416 	case HCI_PRIMARY:
9417 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9418 
9419 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9420 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9421 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9422 			ev.type = 0x01;
9423 		} else {
9424 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9425 					 HCI_MGMT_INDEX_EVENTS);
9426 			ev.type = 0x00;
9427 		}
9428 		break;
9429 	case HCI_AMP:
9430 		ev.type = 0x02;
9431 		break;
9432 	default:
9433 		return;
9434 	}
9435 
9436 	ev.bus = hdev->bus;
9437 
9438 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9439 			 HCI_MGMT_EXT_INDEX_EVENTS);
9440 
9441 	/* Cancel any remaining timed work */
9442 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9443 		return;
9444 	cancel_delayed_work_sync(&hdev->discov_off);
9445 	cancel_delayed_work_sync(&hdev->service_cache);
9446 	cancel_delayed_work_sync(&hdev->rpa_expired);
9447 }
9448 
9449 void mgmt_power_on(struct hci_dev *hdev, int err)
9450 {
9451 	struct cmd_lookup match = { NULL, hdev };
9452 
9453 	bt_dev_dbg(hdev, "err %d", err);
9454 
9455 	hci_dev_lock(hdev);
9456 
9457 	if (!err) {
9458 		restart_le_actions(hdev);
9459 		hci_update_passive_scan(hdev);
9460 	}
9461 
9462 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9463 
9464 	new_settings(hdev, match.sk);
9465 
9466 	if (match.sk)
9467 		sock_put(match.sk);
9468 
9469 	hci_dev_unlock(hdev);
9470 }
9471 
9472 void __mgmt_power_off(struct hci_dev *hdev)
9473 {
9474 	struct cmd_lookup match = { NULL, hdev };
9475 	u8 status, zero_cod[] = { 0, 0, 0 };
9476 
9477 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9478 
9479 	/* If the power off is because of hdev unregistration let
9480 	 * use the appropriate INVALID_INDEX status. Otherwise use
9481 	 * NOT_POWERED. We cover both scenarios here since later in
9482 	 * mgmt_index_removed() any hci_conn callbacks will have already
9483 	 * been triggered, potentially causing misleading DISCONNECTED
9484 	 * status responses.
9485 	 */
9486 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9487 		status = MGMT_STATUS_INVALID_INDEX;
9488 	else
9489 		status = MGMT_STATUS_NOT_POWERED;
9490 
9491 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9492 
9493 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9494 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9495 				   zero_cod, sizeof(zero_cod),
9496 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9497 		ext_info_changed(hdev, NULL);
9498 	}
9499 
9500 	new_settings(hdev, match.sk);
9501 
9502 	if (match.sk)
9503 		sock_put(match.sk);
9504 }
9505 
9506 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9507 {
9508 	struct mgmt_pending_cmd *cmd;
9509 	u8 status;
9510 
9511 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9512 	if (!cmd)
9513 		return;
9514 
9515 	if (err == -ERFKILL)
9516 		status = MGMT_STATUS_RFKILLED;
9517 	else
9518 		status = MGMT_STATUS_FAILED;
9519 
9520 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9521 
9522 	mgmt_pending_remove(cmd);
9523 }
9524 
9525 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9526 		       bool persistent)
9527 {
9528 	struct mgmt_ev_new_link_key ev;
9529 
9530 	memset(&ev, 0, sizeof(ev));
9531 
9532 	ev.store_hint = persistent;
9533 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9534 	ev.key.addr.type = BDADDR_BREDR;
9535 	ev.key.type = key->type;
9536 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9537 	ev.key.pin_len = key->pin_len;
9538 
9539 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9540 }
9541 
9542 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9543 {
9544 	switch (ltk->type) {
9545 	case SMP_LTK:
9546 	case SMP_LTK_RESPONDER:
9547 		if (ltk->authenticated)
9548 			return MGMT_LTK_AUTHENTICATED;
9549 		return MGMT_LTK_UNAUTHENTICATED;
9550 	case SMP_LTK_P256:
9551 		if (ltk->authenticated)
9552 			return MGMT_LTK_P256_AUTH;
9553 		return MGMT_LTK_P256_UNAUTH;
9554 	case SMP_LTK_P256_DEBUG:
9555 		return MGMT_LTK_P256_DEBUG;
9556 	}
9557 
9558 	return MGMT_LTK_UNAUTHENTICATED;
9559 }
9560 
9561 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9562 {
9563 	struct mgmt_ev_new_long_term_key ev;
9564 
9565 	memset(&ev, 0, sizeof(ev));
9566 
9567 	/* Devices using resolvable or non-resolvable random addresses
9568 	 * without providing an identity resolving key don't require
9569 	 * to store long term keys. Their addresses will change the
9570 	 * next time around.
9571 	 *
9572 	 * Only when a remote device provides an identity address
9573 	 * make sure the long term key is stored. If the remote
9574 	 * identity is known, the long term keys are internally
9575 	 * mapped to the identity address. So allow static random
9576 	 * and public addresses here.
9577 	 */
9578 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9579 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9580 		ev.store_hint = 0x00;
9581 	else
9582 		ev.store_hint = persistent;
9583 
9584 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9585 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9586 	ev.key.type = mgmt_ltk_type(key);
9587 	ev.key.enc_size = key->enc_size;
9588 	ev.key.ediv = key->ediv;
9589 	ev.key.rand = key->rand;
9590 
9591 	if (key->type == SMP_LTK)
9592 		ev.key.initiator = 1;
9593 
9594 	/* Make sure we copy only the significant bytes based on the
9595 	 * encryption key size, and set the rest of the value to zeroes.
9596 	 */
9597 	memcpy(ev.key.val, key->val, key->enc_size);
9598 	memset(ev.key.val + key->enc_size, 0,
9599 	       sizeof(ev.key.val) - key->enc_size);
9600 
9601 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9602 }
9603 
9604 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9605 {
9606 	struct mgmt_ev_new_irk ev;
9607 
9608 	memset(&ev, 0, sizeof(ev));
9609 
9610 	ev.store_hint = persistent;
9611 
9612 	bacpy(&ev.rpa, &irk->rpa);
9613 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9614 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9615 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9616 
9617 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9618 }
9619 
9620 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9621 		   bool persistent)
9622 {
9623 	struct mgmt_ev_new_csrk ev;
9624 
9625 	memset(&ev, 0, sizeof(ev));
9626 
9627 	/* Devices using resolvable or non-resolvable random addresses
9628 	 * without providing an identity resolving key don't require
9629 	 * to store signature resolving keys. Their addresses will change
9630 	 * the next time around.
9631 	 *
9632 	 * Only when a remote device provides an identity address
9633 	 * make sure the signature resolving key is stored. So allow
9634 	 * static random and public addresses here.
9635 	 */
9636 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9637 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9638 		ev.store_hint = 0x00;
9639 	else
9640 		ev.store_hint = persistent;
9641 
9642 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9643 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9644 	ev.key.type = csrk->type;
9645 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9646 
9647 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9648 }
9649 
9650 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9651 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9652 			 u16 max_interval, u16 latency, u16 timeout)
9653 {
9654 	struct mgmt_ev_new_conn_param ev;
9655 
9656 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9657 		return;
9658 
9659 	memset(&ev, 0, sizeof(ev));
9660 	bacpy(&ev.addr.bdaddr, bdaddr);
9661 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9662 	ev.store_hint = store_hint;
9663 	ev.min_interval = cpu_to_le16(min_interval);
9664 	ev.max_interval = cpu_to_le16(max_interval);
9665 	ev.latency = cpu_to_le16(latency);
9666 	ev.timeout = cpu_to_le16(timeout);
9667 
9668 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9669 }
9670 
9671 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9672 			   u8 *name, u8 name_len)
9673 {
9674 	struct sk_buff *skb;
9675 	struct mgmt_ev_device_connected *ev;
9676 	u16 eir_len = 0;
9677 	u32 flags = 0;
9678 
9679 	/* allocate buff for LE or BR/EDR adv */
9680 	if (conn->le_adv_data_len > 0)
9681 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9682 				     sizeof(*ev) + conn->le_adv_data_len);
9683 	else
9684 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9685 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9686 				     eir_precalc_len(sizeof(conn->dev_class)));
9687 
9688 	ev = skb_put(skb, sizeof(*ev));
9689 	bacpy(&ev->addr.bdaddr, &conn->dst);
9690 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9691 
9692 	if (conn->out)
9693 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9694 
9695 	ev->flags = __cpu_to_le32(flags);
9696 
9697 	/* We must ensure that the EIR Data fields are ordered and
9698 	 * unique. Keep it simple for now and avoid the problem by not
9699 	 * adding any BR/EDR data to the LE adv.
9700 	 */
9701 	if (conn->le_adv_data_len > 0) {
9702 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9703 		eir_len = conn->le_adv_data_len;
9704 	} else {
9705 		if (name)
9706 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9707 
9708 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9709 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9710 						    conn->dev_class, sizeof(conn->dev_class));
9711 	}
9712 
9713 	ev->eir_len = cpu_to_le16(eir_len);
9714 
9715 	mgmt_event_skb(skb, NULL);
9716 }
9717 
9718 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9719 {
9720 	struct sock **sk = data;
9721 
9722 	cmd->cmd_complete(cmd, 0);
9723 
9724 	*sk = cmd->sk;
9725 	sock_hold(*sk);
9726 
9727 	mgmt_pending_remove(cmd);
9728 }
9729 
9730 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9731 {
9732 	struct hci_dev *hdev = data;
9733 	struct mgmt_cp_unpair_device *cp = cmd->param;
9734 
9735 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9736 
9737 	cmd->cmd_complete(cmd, 0);
9738 	mgmt_pending_remove(cmd);
9739 }
9740 
9741 bool mgmt_powering_down(struct hci_dev *hdev)
9742 {
9743 	struct mgmt_pending_cmd *cmd;
9744 	struct mgmt_mode *cp;
9745 
9746 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9747 	if (!cmd)
9748 		return false;
9749 
9750 	cp = cmd->param;
9751 	if (!cp->val)
9752 		return true;
9753 
9754 	return false;
9755 }
9756 
9757 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9758 			      u8 link_type, u8 addr_type, u8 reason,
9759 			      bool mgmt_connected)
9760 {
9761 	struct mgmt_ev_device_disconnected ev;
9762 	struct sock *sk = NULL;
9763 
9764 	/* The connection is still in hci_conn_hash so test for 1
9765 	 * instead of 0 to know if this is the last one.
9766 	 */
9767 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9768 		cancel_delayed_work(&hdev->power_off);
9769 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9770 	}
9771 
9772 	if (!mgmt_connected)
9773 		return;
9774 
9775 	if (link_type != ACL_LINK && link_type != LE_LINK)
9776 		return;
9777 
9778 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9779 
9780 	bacpy(&ev.addr.bdaddr, bdaddr);
9781 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9782 	ev.reason = reason;
9783 
9784 	/* Report disconnects due to suspend */
9785 	if (hdev->suspended)
9786 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9787 
9788 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9789 
9790 	if (sk)
9791 		sock_put(sk);
9792 
9793 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9794 			     hdev);
9795 }
9796 
9797 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9798 			    u8 link_type, u8 addr_type, u8 status)
9799 {
9800 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9801 	struct mgmt_cp_disconnect *cp;
9802 	struct mgmt_pending_cmd *cmd;
9803 
9804 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9805 			     hdev);
9806 
9807 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9808 	if (!cmd)
9809 		return;
9810 
9811 	cp = cmd->param;
9812 
9813 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9814 		return;
9815 
9816 	if (cp->addr.type != bdaddr_type)
9817 		return;
9818 
9819 	cmd->cmd_complete(cmd, mgmt_status(status));
9820 	mgmt_pending_remove(cmd);
9821 }
9822 
9823 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9824 			 u8 addr_type, u8 status)
9825 {
9826 	struct mgmt_ev_connect_failed ev;
9827 
9828 	/* The connection is still in hci_conn_hash so test for 1
9829 	 * instead of 0 to know if this is the last one.
9830 	 */
9831 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9832 		cancel_delayed_work(&hdev->power_off);
9833 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9834 	}
9835 
9836 	bacpy(&ev.addr.bdaddr, bdaddr);
9837 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9838 	ev.status = mgmt_status(status);
9839 
9840 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9841 }
9842 
9843 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9844 {
9845 	struct mgmt_ev_pin_code_request ev;
9846 
9847 	bacpy(&ev.addr.bdaddr, bdaddr);
9848 	ev.addr.type = BDADDR_BREDR;
9849 	ev.secure = secure;
9850 
9851 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9852 }
9853 
9854 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9855 				  u8 status)
9856 {
9857 	struct mgmt_pending_cmd *cmd;
9858 
9859 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9860 	if (!cmd)
9861 		return;
9862 
9863 	cmd->cmd_complete(cmd, mgmt_status(status));
9864 	mgmt_pending_remove(cmd);
9865 }
9866 
9867 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9868 				      u8 status)
9869 {
9870 	struct mgmt_pending_cmd *cmd;
9871 
9872 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9873 	if (!cmd)
9874 		return;
9875 
9876 	cmd->cmd_complete(cmd, mgmt_status(status));
9877 	mgmt_pending_remove(cmd);
9878 }
9879 
9880 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9881 			      u8 link_type, u8 addr_type, u32 value,
9882 			      u8 confirm_hint)
9883 {
9884 	struct mgmt_ev_user_confirm_request ev;
9885 
9886 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9887 
9888 	bacpy(&ev.addr.bdaddr, bdaddr);
9889 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9890 	ev.confirm_hint = confirm_hint;
9891 	ev.value = cpu_to_le32(value);
9892 
9893 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9894 			  NULL);
9895 }
9896 
9897 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9898 			      u8 link_type, u8 addr_type)
9899 {
9900 	struct mgmt_ev_user_passkey_request ev;
9901 
9902 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9903 
9904 	bacpy(&ev.addr.bdaddr, bdaddr);
9905 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9906 
9907 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9908 			  NULL);
9909 }
9910 
9911 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9912 				      u8 link_type, u8 addr_type, u8 status,
9913 				      u8 opcode)
9914 {
9915 	struct mgmt_pending_cmd *cmd;
9916 
9917 	cmd = pending_find(opcode, hdev);
9918 	if (!cmd)
9919 		return -ENOENT;
9920 
9921 	cmd->cmd_complete(cmd, mgmt_status(status));
9922 	mgmt_pending_remove(cmd);
9923 
9924 	return 0;
9925 }
9926 
9927 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9928 				     u8 link_type, u8 addr_type, u8 status)
9929 {
9930 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9931 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9932 }
9933 
9934 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 					 u8 link_type, u8 addr_type, u8 status)
9936 {
9937 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9938 					  status,
9939 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9940 }
9941 
9942 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9943 				     u8 link_type, u8 addr_type, u8 status)
9944 {
9945 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9946 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9947 }
9948 
9949 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9950 					 u8 link_type, u8 addr_type, u8 status)
9951 {
9952 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9953 					  status,
9954 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9955 }
9956 
9957 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9958 			     u8 link_type, u8 addr_type, u32 passkey,
9959 			     u8 entered)
9960 {
9961 	struct mgmt_ev_passkey_notify ev;
9962 
9963 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9964 
9965 	bacpy(&ev.addr.bdaddr, bdaddr);
9966 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9967 	ev.passkey = __cpu_to_le32(passkey);
9968 	ev.entered = entered;
9969 
9970 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9971 }
9972 
9973 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9974 {
9975 	struct mgmt_ev_auth_failed ev;
9976 	struct mgmt_pending_cmd *cmd;
9977 	u8 status = mgmt_status(hci_status);
9978 
9979 	bacpy(&ev.addr.bdaddr, &conn->dst);
9980 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9981 	ev.status = status;
9982 
9983 	cmd = find_pairing(conn);
9984 
9985 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9986 		    cmd ? cmd->sk : NULL);
9987 
9988 	if (cmd) {
9989 		cmd->cmd_complete(cmd, status);
9990 		mgmt_pending_remove(cmd);
9991 	}
9992 }
9993 
9994 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9995 {
9996 	struct cmd_lookup match = { NULL, hdev };
9997 	bool changed;
9998 
9999 	if (status) {
10000 		u8 mgmt_err = mgmt_status(status);
10001 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10002 				     cmd_status_rsp, &mgmt_err);
10003 		return;
10004 	}
10005 
10006 	if (test_bit(HCI_AUTH, &hdev->flags))
10007 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10008 	else
10009 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10010 
10011 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10012 			     &match);
10013 
10014 	if (changed)
10015 		new_settings(hdev, match.sk);
10016 
10017 	if (match.sk)
10018 		sock_put(match.sk);
10019 }
10020 
10021 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10022 {
10023 	struct cmd_lookup *match = data;
10024 
10025 	if (match->sk == NULL) {
10026 		match->sk = cmd->sk;
10027 		sock_hold(match->sk);
10028 	}
10029 }
10030 
10031 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10032 				    u8 status)
10033 {
10034 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10035 
10036 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10037 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10038 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10039 
10040 	if (!status) {
10041 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10042 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10043 		ext_info_changed(hdev, NULL);
10044 	}
10045 
10046 	if (match.sk)
10047 		sock_put(match.sk);
10048 }
10049 
10050 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10051 {
10052 	struct mgmt_cp_set_local_name ev;
10053 	struct mgmt_pending_cmd *cmd;
10054 
10055 	if (status)
10056 		return;
10057 
10058 	memset(&ev, 0, sizeof(ev));
10059 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10060 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10061 
10062 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10063 	if (!cmd) {
10064 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10065 
10066 		/* If this is a HCI command related to powering on the
10067 		 * HCI dev don't send any mgmt signals.
10068 		 */
10069 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10070 			return;
10071 	}
10072 
10073 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10074 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10075 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10076 }
10077 
10078 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10079 {
10080 	int i;
10081 
10082 	for (i = 0; i < uuid_count; i++) {
10083 		if (!memcmp(uuid, uuids[i], 16))
10084 			return true;
10085 	}
10086 
10087 	return false;
10088 }
10089 
10090 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10091 {
10092 	u16 parsed = 0;
10093 
10094 	while (parsed < eir_len) {
10095 		u8 field_len = eir[0];
10096 		u8 uuid[16];
10097 		int i;
10098 
10099 		if (field_len == 0)
10100 			break;
10101 
10102 		if (eir_len - parsed < field_len + 1)
10103 			break;
10104 
10105 		switch (eir[1]) {
10106 		case EIR_UUID16_ALL:
10107 		case EIR_UUID16_SOME:
10108 			for (i = 0; i + 3 <= field_len; i += 2) {
10109 				memcpy(uuid, bluetooth_base_uuid, 16);
10110 				uuid[13] = eir[i + 3];
10111 				uuid[12] = eir[i + 2];
10112 				if (has_uuid(uuid, uuid_count, uuids))
10113 					return true;
10114 			}
10115 			break;
10116 		case EIR_UUID32_ALL:
10117 		case EIR_UUID32_SOME:
10118 			for (i = 0; i + 5 <= field_len; i += 4) {
10119 				memcpy(uuid, bluetooth_base_uuid, 16);
10120 				uuid[15] = eir[i + 5];
10121 				uuid[14] = eir[i + 4];
10122 				uuid[13] = eir[i + 3];
10123 				uuid[12] = eir[i + 2];
10124 				if (has_uuid(uuid, uuid_count, uuids))
10125 					return true;
10126 			}
10127 			break;
10128 		case EIR_UUID128_ALL:
10129 		case EIR_UUID128_SOME:
10130 			for (i = 0; i + 17 <= field_len; i += 16) {
10131 				memcpy(uuid, eir + i + 2, 16);
10132 				if (has_uuid(uuid, uuid_count, uuids))
10133 					return true;
10134 			}
10135 			break;
10136 		}
10137 
10138 		parsed += field_len + 1;
10139 		eir += field_len + 1;
10140 	}
10141 
10142 	return false;
10143 }
10144 
10145 static void restart_le_scan(struct hci_dev *hdev)
10146 {
10147 	/* If controller is not scanning we are done. */
10148 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10149 		return;
10150 
10151 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10152 		       hdev->discovery.scan_start +
10153 		       hdev->discovery.scan_duration))
10154 		return;
10155 
10156 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10157 			   DISCOV_LE_RESTART_DELAY);
10158 }
10159 
10160 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10161 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10162 {
10163 	/* If a RSSI threshold has been specified, and
10164 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10165 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10166 	 * is set, let it through for further processing, as we might need to
10167 	 * restart the scan.
10168 	 *
10169 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10170 	 * the results are also dropped.
10171 	 */
10172 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10173 	    (rssi == HCI_RSSI_INVALID ||
10174 	    (rssi < hdev->discovery.rssi &&
10175 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10176 		return  false;
10177 
10178 	if (hdev->discovery.uuid_count != 0) {
10179 		/* If a list of UUIDs is provided in filter, results with no
10180 		 * matching UUID should be dropped.
10181 		 */
10182 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10183 				   hdev->discovery.uuids) &&
10184 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10185 				   hdev->discovery.uuid_count,
10186 				   hdev->discovery.uuids))
10187 			return false;
10188 	}
10189 
10190 	/* If duplicate filtering does not report RSSI changes, then restart
10191 	 * scanning to ensure updated result with updated RSSI values.
10192 	 */
10193 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10194 		restart_le_scan(hdev);
10195 
10196 		/* Validate RSSI value against the RSSI threshold once more. */
10197 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10198 		    rssi < hdev->discovery.rssi)
10199 			return false;
10200 	}
10201 
10202 	return true;
10203 }
10204 
10205 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10206 				  bdaddr_t *bdaddr, u8 addr_type)
10207 {
10208 	struct mgmt_ev_adv_monitor_device_lost ev;
10209 
10210 	ev.monitor_handle = cpu_to_le16(handle);
10211 	bacpy(&ev.addr.bdaddr, bdaddr);
10212 	ev.addr.type = addr_type;
10213 
10214 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10215 		   NULL);
10216 }
10217 
10218 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10219 					       struct sk_buff *skb,
10220 					       struct sock *skip_sk,
10221 					       u16 handle)
10222 {
10223 	struct sk_buff *advmon_skb;
10224 	size_t advmon_skb_len;
10225 	__le16 *monitor_handle;
10226 
10227 	if (!skb)
10228 		return;
10229 
10230 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10231 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10232 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10233 				    advmon_skb_len);
10234 	if (!advmon_skb)
10235 		return;
10236 
10237 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10238 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10239 	 * store monitor_handle of the matched monitor.
10240 	 */
10241 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10242 	*monitor_handle = cpu_to_le16(handle);
10243 	skb_put_data(advmon_skb, skb->data, skb->len);
10244 
10245 	mgmt_event_skb(advmon_skb, skip_sk);
10246 }
10247 
10248 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10249 					  bdaddr_t *bdaddr, bool report_device,
10250 					  struct sk_buff *skb,
10251 					  struct sock *skip_sk)
10252 {
10253 	struct monitored_device *dev, *tmp;
10254 	bool matched = false;
10255 	bool notified = false;
10256 
10257 	/* We have received the Advertisement Report because:
10258 	 * 1. the kernel has initiated active discovery
10259 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10260 	 *    passive scanning
10261 	 * 3. if none of the above is true, we have one or more active
10262 	 *    Advertisement Monitor
10263 	 *
10264 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10265 	 * and report ONLY one advertisement per device for the matched Monitor
10266 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10267 	 *
10268 	 * For case 3, since we are not active scanning and all advertisements
10269 	 * received are due to a matched Advertisement Monitor, report all
10270 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10271 	 */
10272 	if (report_device && !hdev->advmon_pend_notify) {
10273 		mgmt_event_skb(skb, skip_sk);
10274 		return;
10275 	}
10276 
10277 	hdev->advmon_pend_notify = false;
10278 
10279 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10280 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10281 			matched = true;
10282 
10283 			if (!dev->notified) {
10284 				mgmt_send_adv_monitor_device_found(hdev, skb,
10285 								   skip_sk,
10286 								   dev->handle);
10287 				notified = true;
10288 				dev->notified = true;
10289 			}
10290 		}
10291 
10292 		if (!dev->notified)
10293 			hdev->advmon_pend_notify = true;
10294 	}
10295 
10296 	if (!report_device &&
10297 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10298 		/* Handle 0 indicates that we are not active scanning and this
10299 		 * is a subsequent advertisement report for an already matched
10300 		 * Advertisement Monitor or the controller offloading support
10301 		 * is not available.
10302 		 */
10303 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10304 	}
10305 
10306 	if (report_device)
10307 		mgmt_event_skb(skb, skip_sk);
10308 	else
10309 		kfree_skb(skb);
10310 }
10311 
10312 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10313 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10314 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10315 			      u64 instant)
10316 {
10317 	struct sk_buff *skb;
10318 	struct mgmt_ev_mesh_device_found *ev;
10319 	int i, j;
10320 
10321 	if (!hdev->mesh_ad_types[0])
10322 		goto accepted;
10323 
10324 	/* Scan for requested AD types */
10325 	if (eir_len > 0) {
10326 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10327 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10328 				if (!hdev->mesh_ad_types[j])
10329 					break;
10330 
10331 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10332 					goto accepted;
10333 			}
10334 		}
10335 	}
10336 
10337 	if (scan_rsp_len > 0) {
10338 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10339 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10340 				if (!hdev->mesh_ad_types[j])
10341 					break;
10342 
10343 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10344 					goto accepted;
10345 			}
10346 		}
10347 	}
10348 
10349 	return;
10350 
10351 accepted:
10352 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10353 			     sizeof(*ev) + eir_len + scan_rsp_len);
10354 	if (!skb)
10355 		return;
10356 
10357 	ev = skb_put(skb, sizeof(*ev));
10358 
10359 	bacpy(&ev->addr.bdaddr, bdaddr);
10360 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10361 	ev->rssi = rssi;
10362 	ev->flags = cpu_to_le32(flags);
10363 	ev->instant = cpu_to_le64(instant);
10364 
10365 	if (eir_len > 0)
10366 		/* Copy EIR or advertising data into event */
10367 		skb_put_data(skb, eir, eir_len);
10368 
10369 	if (scan_rsp_len > 0)
10370 		/* Append scan response data to event */
10371 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10372 
10373 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10374 
10375 	mgmt_event_skb(skb, NULL);
10376 }
10377 
10378 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10379 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10380 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10381 		       u64 instant)
10382 {
10383 	struct sk_buff *skb;
10384 	struct mgmt_ev_device_found *ev;
10385 	bool report_device = hci_discovery_active(hdev);
10386 
10387 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10388 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10389 				  eir, eir_len, scan_rsp, scan_rsp_len,
10390 				  instant);
10391 
10392 	/* Don't send events for a non-kernel initiated discovery. With
10393 	 * LE one exception is if we have pend_le_reports > 0 in which
10394 	 * case we're doing passive scanning and want these events.
10395 	 */
10396 	if (!hci_discovery_active(hdev)) {
10397 		if (link_type == ACL_LINK)
10398 			return;
10399 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10400 			report_device = true;
10401 		else if (!hci_is_adv_monitoring(hdev))
10402 			return;
10403 	}
10404 
10405 	if (hdev->discovery.result_filtering) {
10406 		/* We are using service discovery */
10407 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10408 				     scan_rsp_len))
10409 			return;
10410 	}
10411 
10412 	if (hdev->discovery.limited) {
10413 		/* Check for limited discoverable bit */
10414 		if (dev_class) {
10415 			if (!(dev_class[1] & 0x20))
10416 				return;
10417 		} else {
10418 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10419 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10420 				return;
10421 		}
10422 	}
10423 
10424 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10425 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10426 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10427 	if (!skb)
10428 		return;
10429 
10430 	ev = skb_put(skb, sizeof(*ev));
10431 
10432 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10433 	 * RSSI value was reported as 0 when not available. This behavior
10434 	 * is kept when using device discovery. This is required for full
10435 	 * backwards compatibility with the API.
10436 	 *
10437 	 * However when using service discovery, the value 127 will be
10438 	 * returned when the RSSI is not available.
10439 	 */
10440 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10441 	    link_type == ACL_LINK)
10442 		rssi = 0;
10443 
10444 	bacpy(&ev->addr.bdaddr, bdaddr);
10445 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10446 	ev->rssi = rssi;
10447 	ev->flags = cpu_to_le32(flags);
10448 
10449 	if (eir_len > 0)
10450 		/* Copy EIR or advertising data into event */
10451 		skb_put_data(skb, eir, eir_len);
10452 
10453 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10454 		u8 eir_cod[5];
10455 
10456 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10457 					   dev_class, 3);
10458 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10459 	}
10460 
10461 	if (scan_rsp_len > 0)
10462 		/* Append scan response data to event */
10463 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10464 
10465 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10466 
10467 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10468 }
10469 
10470 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10471 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10472 {
10473 	struct sk_buff *skb;
10474 	struct mgmt_ev_device_found *ev;
10475 	u16 eir_len = 0;
10476 	u32 flags = 0;
10477 
10478 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10479 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10480 
10481 	ev = skb_put(skb, sizeof(*ev));
10482 	bacpy(&ev->addr.bdaddr, bdaddr);
10483 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10484 	ev->rssi = rssi;
10485 
10486 	if (name)
10487 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10488 	else
10489 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10490 
10491 	ev->eir_len = cpu_to_le16(eir_len);
10492 	ev->flags = cpu_to_le32(flags);
10493 
10494 	mgmt_event_skb(skb, NULL);
10495 }
10496 
10497 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10498 {
10499 	struct mgmt_ev_discovering ev;
10500 
10501 	bt_dev_dbg(hdev, "discovering %u", discovering);
10502 
10503 	memset(&ev, 0, sizeof(ev));
10504 	ev.type = hdev->discovery.type;
10505 	ev.discovering = discovering;
10506 
10507 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10508 }
10509 
10510 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10511 {
10512 	struct mgmt_ev_controller_suspend ev;
10513 
10514 	ev.suspend_state = state;
10515 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10516 }
10517 
10518 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10519 		   u8 addr_type)
10520 {
10521 	struct mgmt_ev_controller_resume ev;
10522 
10523 	ev.wake_reason = reason;
10524 	if (bdaddr) {
10525 		bacpy(&ev.addr.bdaddr, bdaddr);
10526 		ev.addr.type = addr_type;
10527 	} else {
10528 		memset(&ev.addr, 0, sizeof(ev.addr));
10529 	}
10530 
10531 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10532 }
10533 
10534 static struct hci_mgmt_chan chan = {
10535 	.channel	= HCI_CHANNEL_CONTROL,
10536 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10537 	.handlers	= mgmt_handlers,
10538 	.hdev_init	= mgmt_init_hdev,
10539 };
10540 
10541 int mgmt_init(void)
10542 {
10543 	return hci_mgmt_chan_register(&chan);
10544 }
10545 
10546 void mgmt_exit(void)
10547 {
10548 	hci_mgmt_chan_unregister(&chan);
10549 }
10550 
10551 void mgmt_cleanup(struct sock *sk)
10552 {
10553 	struct mgmt_mesh_tx *mesh_tx;
10554 	struct hci_dev *hdev;
10555 
10556 	read_lock(&hci_dev_list_lock);
10557 
10558 	list_for_each_entry(hdev, &hci_dev_list, list) {
10559 		do {
10560 			mesh_tx = mgmt_mesh_next(hdev, sk);
10561 
10562 			if (mesh_tx)
10563 				mesh_send_complete(hdev, mesh_tx, true);
10564 		} while (mesh_tx);
10565 	}
10566 
10567 	read_unlock(&hci_dev_list_lock);
10568 }
10569