xref: /linux/net/bluetooth/mgmt.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <linux/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "smp.h"
37 #include "mgmt_util.h"
38 #include "mgmt_config.h"
39 #include "msft.h"
40 #include "eir.h"
41 #include "aosp.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	23
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 	MGMT_OP_SET_MESH_RECEIVER,
132 	MGMT_OP_MESH_READ_FEATURES,
133 	MGMT_OP_MESH_SEND,
134 	MGMT_OP_MESH_SEND_CANCEL,
135 	MGMT_OP_HCI_CMD_SYNC,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	secs_to_jiffies(2)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (hci_test_quirk(d, HCI_QUIRK_RAW_DEVICE))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
620 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((hci_test_quirk(hdev, HCI_QUIRK_INVALID_BDADDR) ||
636 	     hci_test_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
832 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
833 	}
834 
835 	if (lmp_le_capable(hdev)) {
836 		settings |= MGMT_SETTING_LE;
837 		settings |= MGMT_SETTING_SECURE_CONN;
838 		settings |= MGMT_SETTING_PRIVACY;
839 		settings |= MGMT_SETTING_STATIC_ADDRESS;
840 		settings |= MGMT_SETTING_ADVERTISING;
841 	}
842 
843 	if (hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG) || hdev->set_bdaddr)
844 		settings |= MGMT_SETTING_CONFIGURATION;
845 
846 	if (cis_central_capable(hdev))
847 		settings |= MGMT_SETTING_CIS_CENTRAL;
848 
849 	if (cis_peripheral_capable(hdev))
850 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
851 
852 	if (ll_privacy_capable(hdev))
853 		settings |= MGMT_SETTING_LL_PRIVACY;
854 
855 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
856 
857 	return settings;
858 }
859 
860 static u32 get_current_settings(struct hci_dev *hdev)
861 {
862 	u32 settings = 0;
863 
864 	if (hdev_is_powered(hdev))
865 		settings |= MGMT_SETTING_POWERED;
866 
867 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
868 		settings |= MGMT_SETTING_CONNECTABLE;
869 
870 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
871 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
874 		settings |= MGMT_SETTING_DISCOVERABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
877 		settings |= MGMT_SETTING_BONDABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
880 		settings |= MGMT_SETTING_BREDR;
881 
882 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
883 		settings |= MGMT_SETTING_LE;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
886 		settings |= MGMT_SETTING_LINK_SECURITY;
887 
888 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
889 		settings |= MGMT_SETTING_SSP;
890 
891 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
892 		settings |= MGMT_SETTING_ADVERTISING;
893 
894 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
895 		settings |= MGMT_SETTING_SECURE_CONN;
896 
897 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
898 		settings |= MGMT_SETTING_DEBUG_KEYS;
899 
900 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
901 		settings |= MGMT_SETTING_PRIVACY;
902 
903 	/* The current setting for static address has two purposes. The
904 	 * first is to indicate if the static address will be used and
905 	 * the second is to indicate if it is actually set.
906 	 *
907 	 * This means if the static address is not configured, this flag
908 	 * will never be set. If the address is configured, then if the
909 	 * address is actually used decides if the flag is set or not.
910 	 *
911 	 * For single mode LE only controllers and dual-mode controllers
912 	 * with BR/EDR disabled, the existence of the static address will
913 	 * be evaluated.
914 	 */
915 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
916 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
917 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
918 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
919 			settings |= MGMT_SETTING_STATIC_ADDRESS;
920 	}
921 
922 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
923 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
924 
925 	if (cis_central_enabled(hdev))
926 		settings |= MGMT_SETTING_CIS_CENTRAL;
927 
928 	if (cis_peripheral_enabled(hdev))
929 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
930 
931 	if (bis_enabled(hdev))
932 		settings |= MGMT_SETTING_ISO_BROADCASTER;
933 
934 	if (sync_recv_enabled(hdev))
935 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
936 
937 	if (ll_privacy_enabled(hdev))
938 		settings |= MGMT_SETTING_LL_PRIVACY;
939 
940 	return settings;
941 }
942 
943 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
944 {
945 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
946 }
947 
948 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
949 {
950 	struct mgmt_pending_cmd *cmd;
951 
952 	/* If there's a pending mgmt command the flags will not yet have
953 	 * their final values, so check for this first.
954 	 */
955 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
956 	if (cmd) {
957 		struct mgmt_mode *cp = cmd->param;
958 		if (cp->val == 0x01)
959 			return LE_AD_GENERAL;
960 		else if (cp->val == 0x02)
961 			return LE_AD_LIMITED;
962 	} else {
963 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
964 			return LE_AD_LIMITED;
965 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
966 			return LE_AD_GENERAL;
967 	}
968 
969 	return 0;
970 }
971 
972 bool mgmt_get_connectable(struct hci_dev *hdev)
973 {
974 	struct mgmt_pending_cmd *cmd;
975 
976 	/* If there's a pending mgmt command the flag will not yet have
977 	 * it's final value, so check for this first.
978 	 */
979 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
980 	if (cmd) {
981 		struct mgmt_mode *cp = cmd->param;
982 
983 		return cp->val;
984 	}
985 
986 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
987 }
988 
989 static int service_cache_sync(struct hci_dev *hdev, void *data)
990 {
991 	hci_update_eir_sync(hdev);
992 	hci_update_class_sync(hdev);
993 
994 	return 0;
995 }
996 
997 static void service_cache_off(struct work_struct *work)
998 {
999 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1000 					    service_cache.work);
1001 
1002 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1003 		return;
1004 
1005 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1006 }
1007 
1008 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1009 {
1010 	/* The generation of a new RPA and programming it into the
1011 	 * controller happens in the hci_req_enable_advertising()
1012 	 * function.
1013 	 */
1014 	if (ext_adv_capable(hdev))
1015 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1016 	else
1017 		return hci_enable_advertising_sync(hdev);
1018 }
1019 
1020 static void rpa_expired(struct work_struct *work)
1021 {
1022 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 					    rpa_expired.work);
1024 
1025 	bt_dev_dbg(hdev, "");
1026 
1027 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1028 
1029 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1030 		return;
1031 
1032 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1033 }
1034 
1035 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1036 
1037 static void discov_off(struct work_struct *work)
1038 {
1039 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1040 					    discov_off.work);
1041 
1042 	bt_dev_dbg(hdev, "");
1043 
1044 	hci_dev_lock(hdev);
1045 
1046 	/* When discoverable timeout triggers, then just make sure
1047 	 * the limited discoverable flag is cleared. Even in the case
1048 	 * of a timeout triggered from general discoverable, it is
1049 	 * safe to unconditionally clear the flag.
1050 	 */
1051 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1052 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1053 	hdev->discov_timeout = 0;
1054 
1055 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1056 
1057 	mgmt_new_settings(hdev);
1058 
1059 	hci_dev_unlock(hdev);
1060 }
1061 
1062 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1063 
1064 static void mesh_send_complete(struct hci_dev *hdev,
1065 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1066 {
1067 	u8 handle = mesh_tx->handle;
1068 
1069 	if (!silent)
1070 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1071 			   sizeof(handle), NULL);
1072 
1073 	mgmt_mesh_remove(mesh_tx);
1074 }
1075 
1076 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1077 {
1078 	struct mgmt_mesh_tx *mesh_tx;
1079 
1080 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1081 	if (list_empty(&hdev->adv_instances))
1082 		hci_disable_advertising_sync(hdev);
1083 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1084 
1085 	if (mesh_tx)
1086 		mesh_send_complete(hdev, mesh_tx, false);
1087 
1088 	return 0;
1089 }
1090 
1091 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1092 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1093 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1094 {
1095 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1096 
1097 	if (!mesh_tx)
1098 		return;
1099 
1100 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1101 				 mesh_send_start_complete);
1102 
1103 	if (err < 0)
1104 		mesh_send_complete(hdev, mesh_tx, false);
1105 	else
1106 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1107 }
1108 
1109 static void mesh_send_done(struct work_struct *work)
1110 {
1111 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 					    mesh_send_done.work);
1113 
1114 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1115 		return;
1116 
1117 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1118 }
1119 
1120 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1121 {
1122 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1123 		return;
1124 
1125 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1126 
1127 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1128 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1129 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1130 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1131 
1132 	/* Non-mgmt controlled devices get this bit set
1133 	 * implicitly so that pairing works for them, however
1134 	 * for mgmt we require user-space to explicitly enable
1135 	 * it
1136 	 */
1137 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1138 
1139 	hci_dev_set_flag(hdev, HCI_MGMT);
1140 }
1141 
1142 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1143 				void *data, u16 data_len)
1144 {
1145 	struct mgmt_rp_read_info rp;
1146 
1147 	bt_dev_dbg(hdev, "sock %p", sk);
1148 
1149 	hci_dev_lock(hdev);
1150 
1151 	memset(&rp, 0, sizeof(rp));
1152 
1153 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1154 
1155 	rp.version = hdev->hci_ver;
1156 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1157 
1158 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1159 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1160 
1161 	memcpy(rp.dev_class, hdev->dev_class, 3);
1162 
1163 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1164 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1165 
1166 	hci_dev_unlock(hdev);
1167 
1168 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1169 				 sizeof(rp));
1170 }
1171 
1172 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1173 {
1174 	u16 eir_len = 0;
1175 	size_t name_len;
1176 
1177 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1178 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1179 					  hdev->dev_class, 3);
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1182 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1183 					  hdev->appearance);
1184 
1185 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1186 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1187 				  hdev->dev_name, name_len);
1188 
1189 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1191 				  hdev->short_name, name_len);
1192 
1193 	return eir_len;
1194 }
1195 
1196 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1197 				    void *data, u16 data_len)
1198 {
1199 	char buf[512];
1200 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1201 	u16 eir_len;
1202 
1203 	bt_dev_dbg(hdev, "sock %p", sk);
1204 
1205 	memset(&buf, 0, sizeof(buf));
1206 
1207 	hci_dev_lock(hdev);
1208 
1209 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1210 
1211 	rp->version = hdev->hci_ver;
1212 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1213 
1214 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1215 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1216 
1217 
1218 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1219 	rp->eir_len = cpu_to_le16(eir_len);
1220 
1221 	hci_dev_unlock(hdev);
1222 
1223 	/* If this command is called at least once, then the events
1224 	 * for class of device and local name changes are disabled
1225 	 * and only the new extended controller information event
1226 	 * is used.
1227 	 */
1228 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1229 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1230 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1231 
1232 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1233 				 sizeof(*rp) + eir_len);
1234 }
1235 
1236 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1237 {
1238 	char buf[512];
1239 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1240 	u16 eir_len;
1241 
1242 	memset(buf, 0, sizeof(buf));
1243 
1244 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1245 	ev->eir_len = cpu_to_le16(eir_len);
1246 
1247 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1248 				  sizeof(*ev) + eir_len,
1249 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1250 }
1251 
1252 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1253 {
1254 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1255 
1256 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1257 				 sizeof(settings));
1258 }
1259 
1260 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1261 {
1262 	struct mgmt_ev_advertising_added ev;
1263 
1264 	ev.instance = instance;
1265 
1266 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1267 }
1268 
1269 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1270 			      u8 instance)
1271 {
1272 	struct mgmt_ev_advertising_removed ev;
1273 
1274 	ev.instance = instance;
1275 
1276 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1277 }
1278 
1279 static void cancel_adv_timeout(struct hci_dev *hdev)
1280 {
1281 	if (hdev->adv_instance_timeout) {
1282 		hdev->adv_instance_timeout = 0;
1283 		cancel_delayed_work(&hdev->adv_instance_expire);
1284 	}
1285 }
1286 
1287 /* This function requires the caller holds hdev->lock */
1288 static void restart_le_actions(struct hci_dev *hdev)
1289 {
1290 	struct hci_conn_params *p;
1291 
1292 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1293 		/* Needed for AUTO_OFF case where might not "really"
1294 		 * have been powered off.
1295 		 */
1296 		hci_pend_le_list_del_init(p);
1297 
1298 		switch (p->auto_connect) {
1299 		case HCI_AUTO_CONN_DIRECT:
1300 		case HCI_AUTO_CONN_ALWAYS:
1301 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1302 			break;
1303 		case HCI_AUTO_CONN_REPORT:
1304 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1305 			break;
1306 		default:
1307 			break;
1308 		}
1309 	}
1310 }
1311 
1312 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1313 {
1314 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1315 
1316 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1317 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1318 }
1319 
1320 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1321 {
1322 	struct mgmt_pending_cmd *cmd = data;
1323 	struct mgmt_mode *cp;
1324 
1325 	/* Make sure cmd still outstanding. */
1326 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1327 		return;
1328 
1329 	cp = cmd->param;
1330 
1331 	bt_dev_dbg(hdev, "err %d", err);
1332 
1333 	if (!err) {
1334 		if (cp->val) {
1335 			hci_dev_lock(hdev);
1336 			restart_le_actions(hdev);
1337 			hci_update_passive_scan(hdev);
1338 			hci_dev_unlock(hdev);
1339 		}
1340 
1341 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1342 
1343 		/* Only call new_setting for power on as power off is deferred
1344 		 * to hdev->power_off work which does call hci_dev_do_close.
1345 		 */
1346 		if (cp->val)
1347 			new_settings(hdev, cmd->sk);
1348 	} else {
1349 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1350 				mgmt_status(err));
1351 	}
1352 
1353 	mgmt_pending_free(cmd);
1354 }
1355 
1356 static int set_powered_sync(struct hci_dev *hdev, void *data)
1357 {
1358 	struct mgmt_pending_cmd *cmd = data;
1359 	struct mgmt_mode cp;
1360 
1361 	mutex_lock(&hdev->mgmt_pending_lock);
1362 
1363 	/* Make sure cmd still outstanding. */
1364 	if (!__mgmt_pending_listed(hdev, cmd)) {
1365 		mutex_unlock(&hdev->mgmt_pending_lock);
1366 		return -ECANCELED;
1367 	}
1368 
1369 	memcpy(&cp, cmd->param, sizeof(cp));
1370 
1371 	mutex_unlock(&hdev->mgmt_pending_lock);
1372 
1373 	BT_DBG("%s", hdev->name);
1374 
1375 	return hci_set_powered_sync(hdev, cp.val);
1376 }
1377 
1378 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 		       u16 len)
1380 {
1381 	struct mgmt_mode *cp = data;
1382 	struct mgmt_pending_cmd *cmd;
1383 	int err;
1384 
1385 	bt_dev_dbg(hdev, "sock %p", sk);
1386 
1387 	if (cp->val != 0x00 && cp->val != 0x01)
1388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1389 				       MGMT_STATUS_INVALID_PARAMS);
1390 
1391 	hci_dev_lock(hdev);
1392 
1393 	if (!cp->val) {
1394 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1395 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1396 					      MGMT_STATUS_BUSY);
1397 			goto failed;
1398 		}
1399 	}
1400 
1401 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1402 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1403 				      MGMT_STATUS_BUSY);
1404 		goto failed;
1405 	}
1406 
1407 	if (!!cp->val == hdev_is_powered(hdev)) {
1408 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1409 		goto failed;
1410 	}
1411 
1412 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1413 	if (!cmd) {
1414 		err = -ENOMEM;
1415 		goto failed;
1416 	}
1417 
1418 	/* Cancel potentially blocking sync operation before power off */
1419 	if (cp->val == 0x00) {
1420 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1421 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1422 					 mgmt_set_powered_complete);
1423 	} else {
1424 		/* Use hci_cmd_sync_submit since hdev might not be running */
1425 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1426 					  mgmt_set_powered_complete);
1427 	}
1428 
1429 	if (err < 0)
1430 		mgmt_pending_remove(cmd);
1431 
1432 failed:
1433 	hci_dev_unlock(hdev);
1434 	return err;
1435 }
1436 
1437 int mgmt_new_settings(struct hci_dev *hdev)
1438 {
1439 	return new_settings(hdev, NULL);
1440 }
1441 
1442 struct cmd_lookup {
1443 	struct sock *sk;
1444 	struct hci_dev *hdev;
1445 	u8 mgmt_status;
1446 };
1447 
1448 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	struct cmd_lookup *match = data;
1451 
1452 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1453 
1454 	if (match->sk == NULL) {
1455 		match->sk = cmd->sk;
1456 		sock_hold(match->sk);
1457 	}
1458 }
1459 
1460 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1461 {
1462 	u8 *status = data;
1463 
1464 	mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, *status);
1465 }
1466 
1467 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 {
1469 	struct cmd_lookup *match = data;
1470 
1471 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1472 	 * removed/freed.
1473 	 */
1474 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1475 
1476 	if (cmd->cmd_complete) {
1477 		cmd->cmd_complete(cmd, match->mgmt_status);
1478 		return;
1479 	}
1480 
1481 	cmd_status_rsp(cmd, data);
1482 }
1483 
1484 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1485 {
1486 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1487 				 cmd->param, cmd->param_len);
1488 }
1489 
1490 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1491 {
1492 	return mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status,
1493 				 cmd->param, sizeof(struct mgmt_addr_info));
1494 }
1495 
1496 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1497 {
1498 	if (!lmp_bredr_capable(hdev))
1499 		return MGMT_STATUS_NOT_SUPPORTED;
1500 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1501 		return MGMT_STATUS_REJECTED;
1502 	else
1503 		return MGMT_STATUS_SUCCESS;
1504 }
1505 
1506 static u8 mgmt_le_support(struct hci_dev *hdev)
1507 {
1508 	if (!lmp_le_capable(hdev))
1509 		return MGMT_STATUS_NOT_SUPPORTED;
1510 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1511 		return MGMT_STATUS_REJECTED;
1512 	else
1513 		return MGMT_STATUS_SUCCESS;
1514 }
1515 
1516 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1517 					   int err)
1518 {
1519 	struct mgmt_pending_cmd *cmd = data;
1520 
1521 	bt_dev_dbg(hdev, "err %d", err);
1522 
1523 	/* Make sure cmd still outstanding. */
1524 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1525 		return;
1526 
1527 	hci_dev_lock(hdev);
1528 
1529 	if (err) {
1530 		u8 mgmt_err = mgmt_status(err);
1531 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1532 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1533 		goto done;
1534 	}
1535 
1536 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1537 	    hdev->discov_timeout > 0) {
1538 		int to = secs_to_jiffies(hdev->discov_timeout);
1539 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1540 	}
1541 
1542 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1543 	new_settings(hdev, cmd->sk);
1544 
1545 done:
1546 	mgmt_pending_free(cmd);
1547 	hci_dev_unlock(hdev);
1548 }
1549 
1550 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1551 {
1552 	if (!mgmt_pending_listed(hdev, data))
1553 		return -ECANCELED;
1554 
1555 	BT_DBG("%s", hdev->name);
1556 
1557 	return hci_update_discoverable_sync(hdev);
1558 }
1559 
1560 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1561 			    u16 len)
1562 {
1563 	struct mgmt_cp_set_discoverable *cp = data;
1564 	struct mgmt_pending_cmd *cmd;
1565 	u16 timeout;
1566 	int err;
1567 
1568 	bt_dev_dbg(hdev, "sock %p", sk);
1569 
1570 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1571 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				       MGMT_STATUS_REJECTED);
1574 
1575 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1577 				       MGMT_STATUS_INVALID_PARAMS);
1578 
1579 	timeout = __le16_to_cpu(cp->timeout);
1580 
1581 	/* Disabling discoverable requires that no timeout is set,
1582 	 * and enabling limited discoverable requires a timeout.
1583 	 */
1584 	if ((cp->val == 0x00 && timeout > 0) ||
1585 	    (cp->val == 0x02 && timeout == 0))
1586 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				       MGMT_STATUS_INVALID_PARAMS);
1588 
1589 	hci_dev_lock(hdev);
1590 
1591 	if (!hdev_is_powered(hdev) && timeout > 0) {
1592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 				      MGMT_STATUS_NOT_POWERED);
1594 		goto failed;
1595 	}
1596 
1597 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1598 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1599 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 				      MGMT_STATUS_BUSY);
1601 		goto failed;
1602 	}
1603 
1604 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1605 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606 				      MGMT_STATUS_REJECTED);
1607 		goto failed;
1608 	}
1609 
1610 	if (hdev->advertising_paused) {
1611 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1612 				      MGMT_STATUS_BUSY);
1613 		goto failed;
1614 	}
1615 
1616 	if (!hdev_is_powered(hdev)) {
1617 		bool changed = false;
1618 
1619 		/* Setting limited discoverable when powered off is
1620 		 * not a valid operation since it requires a timeout
1621 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1622 		 */
1623 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1624 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1625 			changed = true;
1626 		}
1627 
1628 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 		if (err < 0)
1630 			goto failed;
1631 
1632 		if (changed)
1633 			err = new_settings(hdev, sk);
1634 
1635 		goto failed;
1636 	}
1637 
1638 	/* If the current mode is the same, then just update the timeout
1639 	 * value with the new value. And if only the timeout gets updated,
1640 	 * then no need for any HCI transactions.
1641 	 */
1642 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1643 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1644 						   HCI_LIMITED_DISCOVERABLE)) {
1645 		cancel_delayed_work(&hdev->discov_off);
1646 		hdev->discov_timeout = timeout;
1647 
1648 		if (cp->val && hdev->discov_timeout > 0) {
1649 			int to = secs_to_jiffies(hdev->discov_timeout);
1650 			queue_delayed_work(hdev->req_workqueue,
1651 					   &hdev->discov_off, to);
1652 		}
1653 
1654 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1655 		goto failed;
1656 	}
1657 
1658 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1659 	if (!cmd) {
1660 		err = -ENOMEM;
1661 		goto failed;
1662 	}
1663 
1664 	/* Cancel any potential discoverable timeout that might be
1665 	 * still active and store new timeout value. The arming of
1666 	 * the timeout happens in the complete handler.
1667 	 */
1668 	cancel_delayed_work(&hdev->discov_off);
1669 	hdev->discov_timeout = timeout;
1670 
1671 	if (cp->val)
1672 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1673 	else
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 
1676 	/* Limited discoverable mode */
1677 	if (cp->val == 0x02)
1678 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1679 	else
1680 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1681 
1682 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1683 				 mgmt_set_discoverable_complete);
1684 
1685 	if (err < 0)
1686 		mgmt_pending_remove(cmd);
1687 
1688 failed:
1689 	hci_dev_unlock(hdev);
1690 	return err;
1691 }
1692 
1693 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1694 					  int err)
1695 {
1696 	struct mgmt_pending_cmd *cmd = data;
1697 
1698 	bt_dev_dbg(hdev, "err %d", err);
1699 
1700 	/* Make sure cmd still outstanding. */
1701 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1702 		return;
1703 
1704 	hci_dev_lock(hdev);
1705 
1706 	if (err) {
1707 		u8 mgmt_err = mgmt_status(err);
1708 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1709 		goto done;
1710 	}
1711 
1712 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 	new_settings(hdev, cmd->sk);
1714 
1715 done:
1716 	mgmt_pending_free(cmd);
1717 
1718 	hci_dev_unlock(hdev);
1719 }
1720 
1721 static int set_connectable_update_settings(struct hci_dev *hdev,
1722 					   struct sock *sk, u8 val)
1723 {
1724 	bool changed = false;
1725 	int err;
1726 
1727 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1728 		changed = true;
1729 
1730 	if (val) {
1731 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1732 	} else {
1733 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1734 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1735 	}
1736 
1737 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1738 	if (err < 0)
1739 		return err;
1740 
1741 	if (changed) {
1742 		hci_update_scan(hdev);
1743 		hci_update_passive_scan(hdev);
1744 		return new_settings(hdev, sk);
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1751 {
1752 	if (!mgmt_pending_listed(hdev, data))
1753 		return -ECANCELED;
1754 
1755 	BT_DBG("%s", hdev->name);
1756 
1757 	return hci_update_connectable_sync(hdev);
1758 }
1759 
1760 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1761 			   u16 len)
1762 {
1763 	struct mgmt_mode *cp = data;
1764 	struct mgmt_pending_cmd *cmd;
1765 	int err;
1766 
1767 	bt_dev_dbg(hdev, "sock %p", sk);
1768 
1769 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1770 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1771 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				       MGMT_STATUS_REJECTED);
1773 
1774 	if (cp->val != 0x00 && cp->val != 0x01)
1775 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1776 				       MGMT_STATUS_INVALID_PARAMS);
1777 
1778 	hci_dev_lock(hdev);
1779 
1780 	if (!hdev_is_powered(hdev)) {
1781 		err = set_connectable_update_settings(hdev, sk, cp->val);
1782 		goto failed;
1783 	}
1784 
1785 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1786 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1787 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1788 				      MGMT_STATUS_BUSY);
1789 		goto failed;
1790 	}
1791 
1792 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1793 	if (!cmd) {
1794 		err = -ENOMEM;
1795 		goto failed;
1796 	}
1797 
1798 	if (cp->val) {
1799 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1800 	} else {
1801 		if (hdev->discov_timeout > 0)
1802 			cancel_delayed_work(&hdev->discov_off);
1803 
1804 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1806 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1807 	}
1808 
1809 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1810 				 mgmt_set_connectable_complete);
1811 
1812 	if (err < 0)
1813 		mgmt_pending_remove(cmd);
1814 
1815 failed:
1816 	hci_dev_unlock(hdev);
1817 	return err;
1818 }
1819 
1820 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1821 			u16 len)
1822 {
1823 	struct mgmt_mode *cp = data;
1824 	bool changed;
1825 	int err;
1826 
1827 	bt_dev_dbg(hdev, "sock %p", sk);
1828 
1829 	if (cp->val != 0x00 && cp->val != 0x01)
1830 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1831 				       MGMT_STATUS_INVALID_PARAMS);
1832 
1833 	hci_dev_lock(hdev);
1834 
1835 	if (cp->val)
1836 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1837 	else
1838 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1839 
1840 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1841 	if (err < 0)
1842 		goto unlock;
1843 
1844 	if (changed) {
1845 		/* In limited privacy mode the change of bondable mode
1846 		 * may affect the local advertising address.
1847 		 */
1848 		hci_update_discoverable(hdev);
1849 
1850 		err = new_settings(hdev, sk);
1851 	}
1852 
1853 unlock:
1854 	hci_dev_unlock(hdev);
1855 	return err;
1856 }
1857 
1858 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1859 			     u16 len)
1860 {
1861 	struct mgmt_mode *cp = data;
1862 	struct mgmt_pending_cmd *cmd;
1863 	u8 val, status;
1864 	int err;
1865 
1866 	bt_dev_dbg(hdev, "sock %p", sk);
1867 
1868 	status = mgmt_bredr_support(hdev);
1869 	if (status)
1870 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 				       status);
1872 
1873 	if (cp->val != 0x00 && cp->val != 0x01)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1875 				       MGMT_STATUS_INVALID_PARAMS);
1876 
1877 	hci_dev_lock(hdev);
1878 
1879 	if (!hdev_is_powered(hdev)) {
1880 		bool changed = false;
1881 
1882 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1883 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1884 			changed = true;
1885 		}
1886 
1887 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1888 		if (err < 0)
1889 			goto failed;
1890 
1891 		if (changed)
1892 			err = new_settings(hdev, sk);
1893 
1894 		goto failed;
1895 	}
1896 
1897 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1898 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1899 				      MGMT_STATUS_BUSY);
1900 		goto failed;
1901 	}
1902 
1903 	val = !!cp->val;
1904 
1905 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1906 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1907 		goto failed;
1908 	}
1909 
1910 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1911 	if (!cmd) {
1912 		err = -ENOMEM;
1913 		goto failed;
1914 	}
1915 
1916 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1917 	if (err < 0) {
1918 		mgmt_pending_remove(cmd);
1919 		goto failed;
1920 	}
1921 
1922 failed:
1923 	hci_dev_unlock(hdev);
1924 	return err;
1925 }
1926 
1927 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1928 {
1929 	struct cmd_lookup match = { NULL, hdev };
1930 	struct mgmt_pending_cmd *cmd = data;
1931 	struct mgmt_mode *cp;
1932 	u8 enable;
1933 	bool changed;
1934 
1935 	/* Make sure cmd still outstanding. */
1936 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
1937 		return;
1938 
1939 	cp = cmd->param;
1940 	enable = cp->val;
1941 
1942 	if (err) {
1943 		u8 mgmt_err = mgmt_status(err);
1944 
1945 		if (enable && hci_dev_test_and_clear_flag(hdev,
1946 							  HCI_SSP_ENABLED)) {
1947 			new_settings(hdev, NULL);
1948 		}
1949 
1950 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
1951 		return;
1952 	}
1953 
1954 	if (enable) {
1955 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1956 	} else {
1957 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1958 	}
1959 
1960 	settings_rsp(cmd, &match);
1961 
1962 	if (changed)
1963 		new_settings(hdev, match.sk);
1964 
1965 	if (match.sk)
1966 		sock_put(match.sk);
1967 
1968 	hci_update_eir_sync(hdev);
1969 }
1970 
1971 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1972 {
1973 	struct mgmt_pending_cmd *cmd = data;
1974 	struct mgmt_mode cp;
1975 	bool changed = false;
1976 	int err;
1977 
1978 	mutex_lock(&hdev->mgmt_pending_lock);
1979 
1980 	if (!__mgmt_pending_listed(hdev, cmd)) {
1981 		mutex_unlock(&hdev->mgmt_pending_lock);
1982 		return -ECANCELED;
1983 	}
1984 
1985 	memcpy(&cp, cmd->param, sizeof(cp));
1986 
1987 	mutex_unlock(&hdev->mgmt_pending_lock);
1988 
1989 	if (cp.val)
1990 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1991 
1992 	err = hci_write_ssp_mode_sync(hdev, cp.val);
1993 
1994 	if (!err && changed)
1995 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1996 
1997 	return err;
1998 }
1999 
2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2001 {
2002 	struct mgmt_mode *cp = data;
2003 	struct mgmt_pending_cmd *cmd;
2004 	u8 status;
2005 	int err;
2006 
2007 	bt_dev_dbg(hdev, "sock %p", sk);
2008 
2009 	status = mgmt_bredr_support(hdev);
2010 	if (status)
2011 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2012 
2013 	if (!lmp_ssp_capable(hdev))
2014 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 				       MGMT_STATUS_NOT_SUPPORTED);
2016 
2017 	if (cp->val != 0x00 && cp->val != 0x01)
2018 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 				       MGMT_STATUS_INVALID_PARAMS);
2020 
2021 	hci_dev_lock(hdev);
2022 
2023 	if (!hdev_is_powered(hdev)) {
2024 		bool changed;
2025 
2026 		if (cp->val) {
2027 			changed = !hci_dev_test_and_set_flag(hdev,
2028 							     HCI_SSP_ENABLED);
2029 		} else {
2030 			changed = hci_dev_test_and_clear_flag(hdev,
2031 							      HCI_SSP_ENABLED);
2032 		}
2033 
2034 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2035 		if (err < 0)
2036 			goto failed;
2037 
2038 		if (changed)
2039 			err = new_settings(hdev, sk);
2040 
2041 		goto failed;
2042 	}
2043 
2044 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2045 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2046 				      MGMT_STATUS_BUSY);
2047 		goto failed;
2048 	}
2049 
2050 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2051 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2052 		goto failed;
2053 	}
2054 
2055 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2056 	if (!cmd)
2057 		err = -ENOMEM;
2058 	else
2059 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2060 					 set_ssp_complete);
2061 
2062 	if (err < 0) {
2063 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2064 				      MGMT_STATUS_FAILED);
2065 
2066 		if (cmd)
2067 			mgmt_pending_remove(cmd);
2068 	}
2069 
2070 failed:
2071 	hci_dev_unlock(hdev);
2072 	return err;
2073 }
2074 
2075 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2076 {
2077 	bt_dev_dbg(hdev, "sock %p", sk);
2078 
2079 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 				       MGMT_STATUS_NOT_SUPPORTED);
2081 }
2082 
2083 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2084 {
2085 	struct mgmt_pending_cmd *cmd = data;
2086 	struct cmd_lookup match = { NULL, hdev };
2087 	u8 status = mgmt_status(err);
2088 
2089 	bt_dev_dbg(hdev, "err %d", err);
2090 
2091 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
2092 		return;
2093 
2094 	if (status) {
2095 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
2096 		goto done;
2097 	}
2098 
2099 	settings_rsp(cmd, &match);
2100 
2101 	new_settings(hdev, match.sk);
2102 
2103 	if (match.sk)
2104 		sock_put(match.sk);
2105 
2106 done:
2107 	mgmt_pending_free(cmd);
2108 }
2109 
2110 static int set_le_sync(struct hci_dev *hdev, void *data)
2111 {
2112 	struct mgmt_pending_cmd *cmd = data;
2113 	struct mgmt_mode cp;
2114 	u8 val;
2115 	int err;
2116 
2117 	mutex_lock(&hdev->mgmt_pending_lock);
2118 
2119 	if (!__mgmt_pending_listed(hdev, cmd)) {
2120 		mutex_unlock(&hdev->mgmt_pending_lock);
2121 		return -ECANCELED;
2122 	}
2123 
2124 	memcpy(&cp, cmd->param, sizeof(cp));
2125 	val = !!cp.val;
2126 
2127 	mutex_unlock(&hdev->mgmt_pending_lock);
2128 
2129 	if (!val) {
2130 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2131 
2132 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2133 			hci_disable_advertising_sync(hdev);
2134 
2135 		if (ext_adv_capable(hdev))
2136 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2137 	} else {
2138 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2139 	}
2140 
2141 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2142 
2143 	/* Make sure the controller has a good default for
2144 	 * advertising data. Restrict the update to when LE
2145 	 * has actually been enabled. During power on, the
2146 	 * update in powered_update_hci will take care of it.
2147 	 */
2148 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2149 		if (ext_adv_capable(hdev)) {
2150 			int status;
2151 
2152 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2153 			if (!status)
2154 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2155 		} else {
2156 			hci_update_adv_data_sync(hdev, 0x00);
2157 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2158 		}
2159 
2160 		hci_update_passive_scan(hdev);
2161 	}
2162 
2163 	return err;
2164 }
2165 
2166 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2167 {
2168 	struct mgmt_pending_cmd *cmd = data;
2169 	u8 status = mgmt_status(err);
2170 	struct sock *sk;
2171 
2172 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
2173 		return;
2174 
2175 	sk = cmd->sk;
2176 
2177 	if (status) {
2178 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
2179 				     cmd_status_rsp, &status);
2180 		return;
2181 	}
2182 
2183 	mgmt_pending_remove(cmd);
2184 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2185 }
2186 
2187 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2188 {
2189 	struct mgmt_pending_cmd *cmd = data;
2190 	struct mgmt_cp_set_mesh cp;
2191 	size_t len;
2192 
2193 	mutex_lock(&hdev->mgmt_pending_lock);
2194 
2195 	if (!__mgmt_pending_listed(hdev, cmd)) {
2196 		mutex_unlock(&hdev->mgmt_pending_lock);
2197 		return -ECANCELED;
2198 	}
2199 
2200 	memcpy(&cp, cmd->param, sizeof(cp));
2201 
2202 	mutex_unlock(&hdev->mgmt_pending_lock);
2203 
2204 	len = cmd->param_len;
2205 
2206 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2207 
2208 	if (cp.enable)
2209 		hci_dev_set_flag(hdev, HCI_MESH);
2210 	else
2211 		hci_dev_clear_flag(hdev, HCI_MESH);
2212 
2213 	hdev->le_scan_interval = __le16_to_cpu(cp.period);
2214 	hdev->le_scan_window = __le16_to_cpu(cp.window);
2215 
2216 	len -= sizeof(cp);
2217 
2218 	/* If filters don't fit, forward all adv pkts */
2219 	if (len <= sizeof(hdev->mesh_ad_types))
2220 		memcpy(hdev->mesh_ad_types, cp.ad_types, len);
2221 
2222 	hci_update_passive_scan_sync(hdev);
2223 	return 0;
2224 }
2225 
2226 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2227 {
2228 	struct mgmt_cp_set_mesh *cp = data;
2229 	struct mgmt_pending_cmd *cmd;
2230 	__u16 period, window;
2231 	int err = 0;
2232 
2233 	bt_dev_dbg(hdev, "sock %p", sk);
2234 
2235 	if (!lmp_le_capable(hdev) ||
2236 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2237 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2238 				       MGMT_STATUS_NOT_SUPPORTED);
2239 
2240 	if (cp->enable != 0x00 && cp->enable != 0x01)
2241 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2242 				       MGMT_STATUS_INVALID_PARAMS);
2243 
2244 	/* Keep allowed ranges in sync with set_scan_params() */
2245 	period = __le16_to_cpu(cp->period);
2246 
2247 	if (period < 0x0004 || period > 0x4000)
2248 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2249 				       MGMT_STATUS_INVALID_PARAMS);
2250 
2251 	window = __le16_to_cpu(cp->window);
2252 
2253 	if (window < 0x0004 || window > 0x4000)
2254 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2255 				       MGMT_STATUS_INVALID_PARAMS);
2256 
2257 	if (window > period)
2258 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2259 				       MGMT_STATUS_INVALID_PARAMS);
2260 
2261 	hci_dev_lock(hdev);
2262 
2263 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2264 	if (!cmd)
2265 		err = -ENOMEM;
2266 	else
2267 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2268 					 set_mesh_complete);
2269 
2270 	if (err < 0) {
2271 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2272 				      MGMT_STATUS_FAILED);
2273 
2274 		if (cmd)
2275 			mgmt_pending_remove(cmd);
2276 	}
2277 
2278 	hci_dev_unlock(hdev);
2279 	return err;
2280 }
2281 
2282 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2283 {
2284 	struct mgmt_mesh_tx *mesh_tx = data;
2285 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2286 	unsigned long mesh_send_interval;
2287 	u8 mgmt_err = mgmt_status(err);
2288 
2289 	/* Report any errors here, but don't report completion */
2290 
2291 	if (mgmt_err) {
2292 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2293 		/* Send Complete Error Code for handle */
2294 		mesh_send_complete(hdev, mesh_tx, false);
2295 		return;
2296 	}
2297 
2298 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2299 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2300 			   mesh_send_interval);
2301 }
2302 
2303 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2304 {
2305 	struct mgmt_mesh_tx *mesh_tx = data;
2306 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2307 	struct adv_info *adv, *next_instance;
2308 	u8 instance = hdev->le_num_of_adv_sets + 1;
2309 	u16 timeout, duration;
2310 	int err = 0;
2311 
2312 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2313 		return MGMT_STATUS_BUSY;
2314 
2315 	timeout = 1000;
2316 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2317 	adv = hci_add_adv_instance(hdev, instance, 0,
2318 				   send->adv_data_len, send->adv_data,
2319 				   0, NULL,
2320 				   timeout, duration,
2321 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2322 				   hdev->le_adv_min_interval,
2323 				   hdev->le_adv_max_interval,
2324 				   mesh_tx->handle);
2325 
2326 	if (!IS_ERR(adv))
2327 		mesh_tx->instance = instance;
2328 	else
2329 		err = PTR_ERR(adv);
2330 
2331 	if (hdev->cur_adv_instance == instance) {
2332 		/* If the currently advertised instance is being changed then
2333 		 * cancel the current advertising and schedule the next
2334 		 * instance. If there is only one instance then the overridden
2335 		 * advertising data will be visible right away.
2336 		 */
2337 		cancel_adv_timeout(hdev);
2338 
2339 		next_instance = hci_get_next_instance(hdev, instance);
2340 		if (next_instance)
2341 			instance = next_instance->instance;
2342 		else
2343 			instance = 0;
2344 	} else if (hdev->adv_instance_timeout) {
2345 		/* Immediately advertise the new instance if no other, or
2346 		 * let it go naturally from queue if ADV is already happening
2347 		 */
2348 		instance = 0;
2349 	}
2350 
2351 	if (instance)
2352 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2353 
2354 	return err;
2355 }
2356 
2357 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2358 {
2359 	struct mgmt_rp_mesh_read_features *rp = data;
2360 
2361 	if (rp->used_handles >= rp->max_handles)
2362 		return;
2363 
2364 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2365 }
2366 
2367 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2368 			 void *data, u16 len)
2369 {
2370 	struct mgmt_rp_mesh_read_features rp;
2371 
2372 	if (!lmp_le_capable(hdev) ||
2373 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2375 				       MGMT_STATUS_NOT_SUPPORTED);
2376 
2377 	memset(&rp, 0, sizeof(rp));
2378 	rp.index = cpu_to_le16(hdev->id);
2379 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2380 		rp.max_handles = MESH_HANDLES_MAX;
2381 
2382 	hci_dev_lock(hdev);
2383 
2384 	if (rp.max_handles)
2385 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2386 
2387 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2388 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2389 
2390 	hci_dev_unlock(hdev);
2391 	return 0;
2392 }
2393 
2394 static int send_cancel(struct hci_dev *hdev, void *data)
2395 {
2396 	struct mgmt_pending_cmd *cmd = data;
2397 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2398 	struct mgmt_mesh_tx *mesh_tx;
2399 
2400 	if (!cancel->handle) {
2401 		do {
2402 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2403 
2404 			if (mesh_tx)
2405 				mesh_send_complete(hdev, mesh_tx, false);
2406 		} while (mesh_tx);
2407 	} else {
2408 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2409 
2410 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2411 			mesh_send_complete(hdev, mesh_tx, false);
2412 	}
2413 
2414 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2415 			  0, NULL, 0);
2416 	mgmt_pending_free(cmd);
2417 
2418 	return 0;
2419 }
2420 
2421 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2422 			    void *data, u16 len)
2423 {
2424 	struct mgmt_pending_cmd *cmd;
2425 	int err;
2426 
2427 	if (!lmp_le_capable(hdev) ||
2428 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2429 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2430 				       MGMT_STATUS_NOT_SUPPORTED);
2431 
2432 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2434 				       MGMT_STATUS_REJECTED);
2435 
2436 	hci_dev_lock(hdev);
2437 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2438 	if (!cmd)
2439 		err = -ENOMEM;
2440 	else
2441 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2442 
2443 	if (err < 0) {
2444 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2445 				      MGMT_STATUS_FAILED);
2446 
2447 		if (cmd)
2448 			mgmt_pending_free(cmd);
2449 	}
2450 
2451 	hci_dev_unlock(hdev);
2452 	return err;
2453 }
2454 
2455 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2456 {
2457 	struct mgmt_mesh_tx *mesh_tx;
2458 	struct mgmt_cp_mesh_send *send = data;
2459 	struct mgmt_rp_mesh_read_features rp;
2460 	bool sending;
2461 	int err = 0;
2462 
2463 	if (!lmp_le_capable(hdev) ||
2464 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2465 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 				       MGMT_STATUS_NOT_SUPPORTED);
2467 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2468 	    len <= MGMT_MESH_SEND_SIZE ||
2469 	    len > (MGMT_MESH_SEND_SIZE + 31))
2470 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2471 				       MGMT_STATUS_REJECTED);
2472 
2473 	hci_dev_lock(hdev);
2474 
2475 	memset(&rp, 0, sizeof(rp));
2476 	rp.max_handles = MESH_HANDLES_MAX;
2477 
2478 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2479 
2480 	if (rp.max_handles <= rp.used_handles) {
2481 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2482 				      MGMT_STATUS_BUSY);
2483 		goto done;
2484 	}
2485 
2486 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2487 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2488 
2489 	if (!mesh_tx)
2490 		err = -ENOMEM;
2491 	else if (!sending)
2492 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2493 					 mesh_send_start_complete);
2494 
2495 	if (err < 0) {
2496 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2497 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2498 				      MGMT_STATUS_FAILED);
2499 
2500 		if (mesh_tx) {
2501 			if (sending)
2502 				mgmt_mesh_remove(mesh_tx);
2503 		}
2504 	} else {
2505 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2506 
2507 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2508 				  &mesh_tx->handle, 1);
2509 	}
2510 
2511 done:
2512 	hci_dev_unlock(hdev);
2513 	return err;
2514 }
2515 
2516 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2517 {
2518 	struct mgmt_mode *cp = data;
2519 	struct mgmt_pending_cmd *cmd;
2520 	int err;
2521 	u8 val, enabled;
2522 
2523 	bt_dev_dbg(hdev, "sock %p", sk);
2524 
2525 	if (!lmp_le_capable(hdev))
2526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2527 				       MGMT_STATUS_NOT_SUPPORTED);
2528 
2529 	if (cp->val != 0x00 && cp->val != 0x01)
2530 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2531 				       MGMT_STATUS_INVALID_PARAMS);
2532 
2533 	/* Bluetooth single mode LE only controllers or dual-mode
2534 	 * controllers configured as LE only devices, do not allow
2535 	 * switching LE off. These have either LE enabled explicitly
2536 	 * or BR/EDR has been previously switched off.
2537 	 *
2538 	 * When trying to enable an already enabled LE, then gracefully
2539 	 * send a positive response. Trying to disable it however will
2540 	 * result into rejection.
2541 	 */
2542 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2543 		if (cp->val == 0x01)
2544 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 
2546 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2547 				       MGMT_STATUS_REJECTED);
2548 	}
2549 
2550 	hci_dev_lock(hdev);
2551 
2552 	val = !!cp->val;
2553 	enabled = lmp_host_le_capable(hdev);
2554 
2555 	if (!hdev_is_powered(hdev) || val == enabled) {
2556 		bool changed = false;
2557 
2558 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2559 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2560 			changed = true;
2561 		}
2562 
2563 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2564 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2565 			changed = true;
2566 		}
2567 
2568 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2569 		if (err < 0)
2570 			goto unlock;
2571 
2572 		if (changed)
2573 			err = new_settings(hdev, sk);
2574 
2575 		goto unlock;
2576 	}
2577 
2578 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2579 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2581 				      MGMT_STATUS_BUSY);
2582 		goto unlock;
2583 	}
2584 
2585 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2586 	if (!cmd)
2587 		err = -ENOMEM;
2588 	else
2589 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2590 					 set_le_complete);
2591 
2592 	if (err < 0) {
2593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2594 				      MGMT_STATUS_FAILED);
2595 
2596 		if (cmd)
2597 			mgmt_pending_remove(cmd);
2598 	}
2599 
2600 unlock:
2601 	hci_dev_unlock(hdev);
2602 	return err;
2603 }
2604 
2605 static int send_hci_cmd_sync(struct hci_dev *hdev, void *data)
2606 {
2607 	struct mgmt_pending_cmd *cmd = data;
2608 	struct mgmt_cp_hci_cmd_sync *cp = cmd->param;
2609 	struct sk_buff *skb;
2610 
2611 	skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode),
2612 				le16_to_cpu(cp->params_len), cp->params,
2613 				cp->event, cp->timeout ?
2614 				secs_to_jiffies(cp->timeout) :
2615 				HCI_CMD_TIMEOUT);
2616 	if (IS_ERR(skb)) {
2617 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2618 				mgmt_status(PTR_ERR(skb)));
2619 		goto done;
2620 	}
2621 
2622 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, 0,
2623 			  skb->data, skb->len);
2624 
2625 	kfree_skb(skb);
2626 
2627 done:
2628 	mgmt_pending_free(cmd);
2629 
2630 	return 0;
2631 }
2632 
2633 static int mgmt_hci_cmd_sync(struct sock *sk, struct hci_dev *hdev,
2634 			     void *data, u16 len)
2635 {
2636 	struct mgmt_cp_hci_cmd_sync *cp = data;
2637 	struct mgmt_pending_cmd *cmd;
2638 	int err;
2639 
2640 	if (len != (offsetof(struct mgmt_cp_hci_cmd_sync, params) +
2641 		    le16_to_cpu(cp->params_len)))
2642 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2643 				       MGMT_STATUS_INVALID_PARAMS);
2644 
2645 	hci_dev_lock(hdev);
2646 	cmd = mgmt_pending_new(sk, MGMT_OP_HCI_CMD_SYNC, hdev, data, len);
2647 	if (!cmd)
2648 		err = -ENOMEM;
2649 	else
2650 		err = hci_cmd_sync_queue(hdev, send_hci_cmd_sync, cmd, NULL);
2651 
2652 	if (err < 0) {
2653 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_HCI_CMD_SYNC,
2654 				      MGMT_STATUS_FAILED);
2655 
2656 		if (cmd)
2657 			mgmt_pending_free(cmd);
2658 	}
2659 
2660 	hci_dev_unlock(hdev);
2661 	return err;
2662 }
2663 
2664 /* This is a helper function to test for pending mgmt commands that can
2665  * cause CoD or EIR HCI commands. We can only allow one such pending
2666  * mgmt command at a time since otherwise we cannot easily track what
2667  * the current values are, will be, and based on that calculate if a new
2668  * HCI command needs to be sent and if yes with what value.
2669  */
2670 static bool pending_eir_or_class(struct hci_dev *hdev)
2671 {
2672 	struct mgmt_pending_cmd *cmd;
2673 
2674 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2675 		switch (cmd->opcode) {
2676 		case MGMT_OP_ADD_UUID:
2677 		case MGMT_OP_REMOVE_UUID:
2678 		case MGMT_OP_SET_DEV_CLASS:
2679 		case MGMT_OP_SET_POWERED:
2680 			return true;
2681 		}
2682 	}
2683 
2684 	return false;
2685 }
2686 
2687 static const u8 bluetooth_base_uuid[] = {
2688 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2689 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2690 };
2691 
2692 static u8 get_uuid_size(const u8 *uuid)
2693 {
2694 	u32 val;
2695 
2696 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2697 		return 128;
2698 
2699 	val = get_unaligned_le32(&uuid[12]);
2700 	if (val > 0xffff)
2701 		return 32;
2702 
2703 	return 16;
2704 }
2705 
2706 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2707 {
2708 	struct mgmt_pending_cmd *cmd = data;
2709 
2710 	bt_dev_dbg(hdev, "err %d", err);
2711 
2712 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
2713 			  mgmt_status(err), hdev->dev_class, 3);
2714 
2715 	mgmt_pending_free(cmd);
2716 }
2717 
2718 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2719 {
2720 	int err;
2721 
2722 	err = hci_update_class_sync(hdev);
2723 	if (err)
2724 		return err;
2725 
2726 	return hci_update_eir_sync(hdev);
2727 }
2728 
2729 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2730 {
2731 	struct mgmt_cp_add_uuid *cp = data;
2732 	struct mgmt_pending_cmd *cmd;
2733 	struct bt_uuid *uuid;
2734 	int err;
2735 
2736 	bt_dev_dbg(hdev, "sock %p", sk);
2737 
2738 	hci_dev_lock(hdev);
2739 
2740 	if (pending_eir_or_class(hdev)) {
2741 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2742 				      MGMT_STATUS_BUSY);
2743 		goto failed;
2744 	}
2745 
2746 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2747 	if (!uuid) {
2748 		err = -ENOMEM;
2749 		goto failed;
2750 	}
2751 
2752 	memcpy(uuid->uuid, cp->uuid, 16);
2753 	uuid->svc_hint = cp->svc_hint;
2754 	uuid->size = get_uuid_size(cp->uuid);
2755 
2756 	list_add_tail(&uuid->list, &hdev->uuids);
2757 
2758 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2759 	if (!cmd) {
2760 		err = -ENOMEM;
2761 		goto failed;
2762 	}
2763 
2764 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2765 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2766 	 */
2767 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2768 				  mgmt_class_complete);
2769 	if (err < 0) {
2770 		mgmt_pending_free(cmd);
2771 		goto failed;
2772 	}
2773 
2774 failed:
2775 	hci_dev_unlock(hdev);
2776 	return err;
2777 }
2778 
2779 static bool enable_service_cache(struct hci_dev *hdev)
2780 {
2781 	if (!hdev_is_powered(hdev))
2782 		return false;
2783 
2784 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2785 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2786 				   CACHE_TIMEOUT);
2787 		return true;
2788 	}
2789 
2790 	return false;
2791 }
2792 
2793 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2794 {
2795 	int err;
2796 
2797 	err = hci_update_class_sync(hdev);
2798 	if (err)
2799 		return err;
2800 
2801 	return hci_update_eir_sync(hdev);
2802 }
2803 
2804 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2805 		       u16 len)
2806 {
2807 	struct mgmt_cp_remove_uuid *cp = data;
2808 	struct mgmt_pending_cmd *cmd;
2809 	struct bt_uuid *match, *tmp;
2810 	static const u8 bt_uuid_any[] = {
2811 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2812 	};
2813 	int err, found;
2814 
2815 	bt_dev_dbg(hdev, "sock %p", sk);
2816 
2817 	hci_dev_lock(hdev);
2818 
2819 	if (pending_eir_or_class(hdev)) {
2820 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2821 				      MGMT_STATUS_BUSY);
2822 		goto unlock;
2823 	}
2824 
2825 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2826 		hci_uuids_clear(hdev);
2827 
2828 		if (enable_service_cache(hdev)) {
2829 			err = mgmt_cmd_complete(sk, hdev->id,
2830 						MGMT_OP_REMOVE_UUID,
2831 						0, hdev->dev_class, 3);
2832 			goto unlock;
2833 		}
2834 
2835 		goto update_class;
2836 	}
2837 
2838 	found = 0;
2839 
2840 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2841 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2842 			continue;
2843 
2844 		list_del(&match->list);
2845 		kfree(match);
2846 		found++;
2847 	}
2848 
2849 	if (found == 0) {
2850 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2851 				      MGMT_STATUS_INVALID_PARAMS);
2852 		goto unlock;
2853 	}
2854 
2855 update_class:
2856 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2857 	if (!cmd) {
2858 		err = -ENOMEM;
2859 		goto unlock;
2860 	}
2861 
2862 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2863 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2864 	 */
2865 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2866 				  mgmt_class_complete);
2867 	if (err < 0)
2868 		mgmt_pending_free(cmd);
2869 
2870 unlock:
2871 	hci_dev_unlock(hdev);
2872 	return err;
2873 }
2874 
2875 static int set_class_sync(struct hci_dev *hdev, void *data)
2876 {
2877 	int err = 0;
2878 
2879 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2880 		cancel_delayed_work_sync(&hdev->service_cache);
2881 		err = hci_update_eir_sync(hdev);
2882 	}
2883 
2884 	if (err)
2885 		return err;
2886 
2887 	return hci_update_class_sync(hdev);
2888 }
2889 
2890 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2891 			 u16 len)
2892 {
2893 	struct mgmt_cp_set_dev_class *cp = data;
2894 	struct mgmt_pending_cmd *cmd;
2895 	int err;
2896 
2897 	bt_dev_dbg(hdev, "sock %p", sk);
2898 
2899 	if (!lmp_bredr_capable(hdev))
2900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2901 				       MGMT_STATUS_NOT_SUPPORTED);
2902 
2903 	hci_dev_lock(hdev);
2904 
2905 	if (pending_eir_or_class(hdev)) {
2906 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2907 				      MGMT_STATUS_BUSY);
2908 		goto unlock;
2909 	}
2910 
2911 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2912 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2913 				      MGMT_STATUS_INVALID_PARAMS);
2914 		goto unlock;
2915 	}
2916 
2917 	hdev->major_class = cp->major;
2918 	hdev->minor_class = cp->minor;
2919 
2920 	if (!hdev_is_powered(hdev)) {
2921 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2922 					hdev->dev_class, 3);
2923 		goto unlock;
2924 	}
2925 
2926 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2927 	if (!cmd) {
2928 		err = -ENOMEM;
2929 		goto unlock;
2930 	}
2931 
2932 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2933 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2934 	 */
2935 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2936 				  mgmt_class_complete);
2937 	if (err < 0)
2938 		mgmt_pending_free(cmd);
2939 
2940 unlock:
2941 	hci_dev_unlock(hdev);
2942 	return err;
2943 }
2944 
2945 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2946 			  u16 len)
2947 {
2948 	struct mgmt_cp_load_link_keys *cp = data;
2949 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2950 				   sizeof(struct mgmt_link_key_info));
2951 	u16 key_count, expected_len;
2952 	bool changed;
2953 	int i;
2954 
2955 	bt_dev_dbg(hdev, "sock %p", sk);
2956 
2957 	if (!lmp_bredr_capable(hdev))
2958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2959 				       MGMT_STATUS_NOT_SUPPORTED);
2960 
2961 	key_count = __le16_to_cpu(cp->key_count);
2962 	if (key_count > max_key_count) {
2963 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2964 			   key_count);
2965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2966 				       MGMT_STATUS_INVALID_PARAMS);
2967 	}
2968 
2969 	expected_len = struct_size(cp, keys, key_count);
2970 	if (expected_len != len) {
2971 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2972 			   expected_len, len);
2973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2974 				       MGMT_STATUS_INVALID_PARAMS);
2975 	}
2976 
2977 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2979 				       MGMT_STATUS_INVALID_PARAMS);
2980 
2981 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2982 		   key_count);
2983 
2984 	hci_dev_lock(hdev);
2985 
2986 	hci_link_keys_clear(hdev);
2987 
2988 	if (cp->debug_keys)
2989 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2990 	else
2991 		changed = hci_dev_test_and_clear_flag(hdev,
2992 						      HCI_KEEP_DEBUG_KEYS);
2993 
2994 	if (changed)
2995 		new_settings(hdev, NULL);
2996 
2997 	for (i = 0; i < key_count; i++) {
2998 		struct mgmt_link_key_info *key = &cp->keys[i];
2999 
3000 		if (hci_is_blocked_key(hdev,
3001 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
3002 				       key->val)) {
3003 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
3004 				    &key->addr.bdaddr);
3005 			continue;
3006 		}
3007 
3008 		if (key->addr.type != BDADDR_BREDR) {
3009 			bt_dev_warn(hdev,
3010 				    "Invalid link address type %u for %pMR",
3011 				    key->addr.type, &key->addr.bdaddr);
3012 			continue;
3013 		}
3014 
3015 		if (key->type > 0x08) {
3016 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
3017 				    key->type, &key->addr.bdaddr);
3018 			continue;
3019 		}
3020 
3021 		/* Always ignore debug keys and require a new pairing if
3022 		 * the user wants to use them.
3023 		 */
3024 		if (key->type == HCI_LK_DEBUG_COMBINATION)
3025 			continue;
3026 
3027 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
3028 				 key->type, key->pin_len, NULL);
3029 	}
3030 
3031 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
3032 
3033 	hci_dev_unlock(hdev);
3034 
3035 	return 0;
3036 }
3037 
3038 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
3039 			   u8 addr_type, struct sock *skip_sk)
3040 {
3041 	struct mgmt_ev_device_unpaired ev;
3042 
3043 	bacpy(&ev.addr.bdaddr, bdaddr);
3044 	ev.addr.type = addr_type;
3045 
3046 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
3047 			  skip_sk);
3048 }
3049 
3050 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
3051 {
3052 	struct mgmt_pending_cmd *cmd = data;
3053 	struct mgmt_cp_unpair_device *cp = cmd->param;
3054 
3055 	if (!err)
3056 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3057 
3058 	cmd->cmd_complete(cmd, err);
3059 	mgmt_pending_free(cmd);
3060 }
3061 
3062 static int unpair_device_sync(struct hci_dev *hdev, void *data)
3063 {
3064 	struct mgmt_pending_cmd *cmd = data;
3065 	struct mgmt_cp_unpair_device *cp = cmd->param;
3066 	struct hci_conn *conn;
3067 
3068 	if (cp->addr.type == BDADDR_BREDR)
3069 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3070 					       &cp->addr.bdaddr);
3071 	else
3072 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3073 					       le_addr_type(cp->addr.type));
3074 
3075 	if (!conn)
3076 		return 0;
3077 
3078 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3079 	 * will clean up the connection no matter the error.
3080 	 */
3081 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3082 
3083 	return 0;
3084 }
3085 
3086 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3087 			 u16 len)
3088 {
3089 	struct mgmt_cp_unpair_device *cp = data;
3090 	struct mgmt_rp_unpair_device rp;
3091 	struct hci_conn_params *params;
3092 	struct mgmt_pending_cmd *cmd;
3093 	struct hci_conn *conn;
3094 	u8 addr_type;
3095 	int err;
3096 
3097 	memset(&rp, 0, sizeof(rp));
3098 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3099 	rp.addr.type = cp->addr.type;
3100 
3101 	if (!bdaddr_type_is_valid(cp->addr.type))
3102 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3103 					 MGMT_STATUS_INVALID_PARAMS,
3104 					 &rp, sizeof(rp));
3105 
3106 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3107 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3108 					 MGMT_STATUS_INVALID_PARAMS,
3109 					 &rp, sizeof(rp));
3110 
3111 	hci_dev_lock(hdev);
3112 
3113 	if (!hdev_is_powered(hdev)) {
3114 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3115 					MGMT_STATUS_NOT_POWERED, &rp,
3116 					sizeof(rp));
3117 		goto unlock;
3118 	}
3119 
3120 	if (cp->addr.type == BDADDR_BREDR) {
3121 		/* If disconnection is requested, then look up the
3122 		 * connection. If the remote device is connected, it
3123 		 * will be later used to terminate the link.
3124 		 *
3125 		 * Setting it to NULL explicitly will cause no
3126 		 * termination of the link.
3127 		 */
3128 		if (cp->disconnect)
3129 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3130 						       &cp->addr.bdaddr);
3131 		else
3132 			conn = NULL;
3133 
3134 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3135 		if (err < 0) {
3136 			err = mgmt_cmd_complete(sk, hdev->id,
3137 						MGMT_OP_UNPAIR_DEVICE,
3138 						MGMT_STATUS_NOT_PAIRED, &rp,
3139 						sizeof(rp));
3140 			goto unlock;
3141 		}
3142 
3143 		goto done;
3144 	}
3145 
3146 	/* LE address type */
3147 	addr_type = le_addr_type(cp->addr.type);
3148 
3149 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3150 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3151 	if (err < 0) {
3152 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3153 					MGMT_STATUS_NOT_PAIRED, &rp,
3154 					sizeof(rp));
3155 		goto unlock;
3156 	}
3157 
3158 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3159 	if (!conn) {
3160 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3161 		goto done;
3162 	}
3163 
3164 
3165 	/* Defer clearing up the connection parameters until closing to
3166 	 * give a chance of keeping them if a repairing happens.
3167 	 */
3168 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3169 
3170 	/* Disable auto-connection parameters if present */
3171 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3172 	if (params) {
3173 		if (params->explicit_connect)
3174 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3175 		else
3176 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3177 	}
3178 
3179 	/* If disconnection is not requested, then clear the connection
3180 	 * variable so that the link is not terminated.
3181 	 */
3182 	if (!cp->disconnect)
3183 		conn = NULL;
3184 
3185 done:
3186 	/* If the connection variable is set, then termination of the
3187 	 * link is requested.
3188 	 */
3189 	if (!conn) {
3190 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3191 					&rp, sizeof(rp));
3192 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3193 		goto unlock;
3194 	}
3195 
3196 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3197 			       sizeof(*cp));
3198 	if (!cmd) {
3199 		err = -ENOMEM;
3200 		goto unlock;
3201 	}
3202 
3203 	cmd->cmd_complete = addr_cmd_complete;
3204 
3205 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3206 				 unpair_device_complete);
3207 	if (err < 0)
3208 		mgmt_pending_free(cmd);
3209 
3210 unlock:
3211 	hci_dev_unlock(hdev);
3212 	return err;
3213 }
3214 
3215 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3216 {
3217 	struct mgmt_pending_cmd *cmd = data;
3218 
3219 	cmd->cmd_complete(cmd, mgmt_status(err));
3220 	mgmt_pending_free(cmd);
3221 }
3222 
3223 static int disconnect_sync(struct hci_dev *hdev, void *data)
3224 {
3225 	struct mgmt_pending_cmd *cmd = data;
3226 	struct mgmt_cp_disconnect *cp = cmd->param;
3227 	struct hci_conn *conn;
3228 
3229 	if (cp->addr.type == BDADDR_BREDR)
3230 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3231 					       &cp->addr.bdaddr);
3232 	else
3233 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3234 					       le_addr_type(cp->addr.type));
3235 
3236 	if (!conn)
3237 		return -ENOTCONN;
3238 
3239 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3240 	 * will clean up the connection no matter the error.
3241 	 */
3242 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3243 
3244 	return 0;
3245 }
3246 
3247 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3248 		      u16 len)
3249 {
3250 	struct mgmt_cp_disconnect *cp = data;
3251 	struct mgmt_rp_disconnect rp;
3252 	struct mgmt_pending_cmd *cmd;
3253 	int err;
3254 
3255 	bt_dev_dbg(hdev, "sock %p", sk);
3256 
3257 	memset(&rp, 0, sizeof(rp));
3258 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3259 	rp.addr.type = cp->addr.type;
3260 
3261 	if (!bdaddr_type_is_valid(cp->addr.type))
3262 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3263 					 MGMT_STATUS_INVALID_PARAMS,
3264 					 &rp, sizeof(rp));
3265 
3266 	hci_dev_lock(hdev);
3267 
3268 	if (!test_bit(HCI_UP, &hdev->flags)) {
3269 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3270 					MGMT_STATUS_NOT_POWERED, &rp,
3271 					sizeof(rp));
3272 		goto failed;
3273 	}
3274 
3275 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3276 	if (!cmd) {
3277 		err = -ENOMEM;
3278 		goto failed;
3279 	}
3280 
3281 	cmd->cmd_complete = generic_cmd_complete;
3282 
3283 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3284 				 disconnect_complete);
3285 	if (err < 0)
3286 		mgmt_pending_free(cmd);
3287 
3288 failed:
3289 	hci_dev_unlock(hdev);
3290 	return err;
3291 }
3292 
3293 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3294 {
3295 	switch (link_type) {
3296 	case CIS_LINK:
3297 	case BIS_LINK:
3298 	case PA_LINK:
3299 	case LE_LINK:
3300 		switch (addr_type) {
3301 		case ADDR_LE_DEV_PUBLIC:
3302 			return BDADDR_LE_PUBLIC;
3303 
3304 		default:
3305 			/* Fallback to LE Random address type */
3306 			return BDADDR_LE_RANDOM;
3307 		}
3308 
3309 	default:
3310 		/* Fallback to BR/EDR type */
3311 		return BDADDR_BREDR;
3312 	}
3313 }
3314 
3315 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3316 			   u16 data_len)
3317 {
3318 	struct mgmt_rp_get_connections *rp;
3319 	struct hci_conn *c;
3320 	int err;
3321 	u16 i;
3322 
3323 	bt_dev_dbg(hdev, "sock %p", sk);
3324 
3325 	hci_dev_lock(hdev);
3326 
3327 	if (!hdev_is_powered(hdev)) {
3328 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3329 				      MGMT_STATUS_NOT_POWERED);
3330 		goto unlock;
3331 	}
3332 
3333 	i = 0;
3334 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3335 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3336 			i++;
3337 	}
3338 
3339 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3340 	if (!rp) {
3341 		err = -ENOMEM;
3342 		goto unlock;
3343 	}
3344 
3345 	i = 0;
3346 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3347 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3348 			continue;
3349 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3350 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3351 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3352 			continue;
3353 		i++;
3354 	}
3355 
3356 	rp->conn_count = cpu_to_le16(i);
3357 
3358 	/* Recalculate length in case of filtered SCO connections, etc */
3359 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3360 				struct_size(rp, addr, i));
3361 
3362 	kfree(rp);
3363 
3364 unlock:
3365 	hci_dev_unlock(hdev);
3366 	return err;
3367 }
3368 
3369 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370 				   struct mgmt_cp_pin_code_neg_reply *cp)
3371 {
3372 	struct mgmt_pending_cmd *cmd;
3373 	int err;
3374 
3375 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3376 			       sizeof(*cp));
3377 	if (!cmd)
3378 		return -ENOMEM;
3379 
3380 	cmd->cmd_complete = addr_cmd_complete;
3381 
3382 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3383 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3384 	if (err < 0)
3385 		mgmt_pending_remove(cmd);
3386 
3387 	return err;
3388 }
3389 
3390 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct hci_conn *conn;
3394 	struct mgmt_cp_pin_code_reply *cp = data;
3395 	struct hci_cp_pin_code_reply reply;
3396 	struct mgmt_pending_cmd *cmd;
3397 	int err;
3398 
3399 	bt_dev_dbg(hdev, "sock %p", sk);
3400 
3401 	hci_dev_lock(hdev);
3402 
3403 	if (!hdev_is_powered(hdev)) {
3404 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3405 				      MGMT_STATUS_NOT_POWERED);
3406 		goto failed;
3407 	}
3408 
3409 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3410 	if (!conn) {
3411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3412 				      MGMT_STATUS_NOT_CONNECTED);
3413 		goto failed;
3414 	}
3415 
3416 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3417 		struct mgmt_cp_pin_code_neg_reply ncp;
3418 
3419 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3420 
3421 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3422 
3423 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3424 		if (err >= 0)
3425 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3426 					      MGMT_STATUS_INVALID_PARAMS);
3427 
3428 		goto failed;
3429 	}
3430 
3431 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3432 	if (!cmd) {
3433 		err = -ENOMEM;
3434 		goto failed;
3435 	}
3436 
3437 	cmd->cmd_complete = addr_cmd_complete;
3438 
3439 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3440 	reply.pin_len = cp->pin_len;
3441 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3442 
3443 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3444 	if (err < 0)
3445 		mgmt_pending_remove(cmd);
3446 
3447 failed:
3448 	hci_dev_unlock(hdev);
3449 	return err;
3450 }
3451 
3452 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3453 			     u16 len)
3454 {
3455 	struct mgmt_cp_set_io_capability *cp = data;
3456 
3457 	bt_dev_dbg(hdev, "sock %p", sk);
3458 
3459 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3461 				       MGMT_STATUS_INVALID_PARAMS);
3462 
3463 	hci_dev_lock(hdev);
3464 
3465 	hdev->io_capability = cp->io_capability;
3466 
3467 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3468 
3469 	hci_dev_unlock(hdev);
3470 
3471 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3472 				 NULL, 0);
3473 }
3474 
3475 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3476 {
3477 	struct hci_dev *hdev = conn->hdev;
3478 	struct mgmt_pending_cmd *cmd;
3479 
3480 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3481 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3482 			continue;
3483 
3484 		if (cmd->user_data != conn)
3485 			continue;
3486 
3487 		return cmd;
3488 	}
3489 
3490 	return NULL;
3491 }
3492 
3493 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3494 {
3495 	struct mgmt_rp_pair_device rp;
3496 	struct hci_conn *conn = cmd->user_data;
3497 	int err;
3498 
3499 	bacpy(&rp.addr.bdaddr, &conn->dst);
3500 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3501 
3502 	err = mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_PAIR_DEVICE,
3503 				status, &rp, sizeof(rp));
3504 
3505 	/* So we don't get further callbacks for this connection */
3506 	conn->connect_cfm_cb = NULL;
3507 	conn->security_cfm_cb = NULL;
3508 	conn->disconn_cfm_cb = NULL;
3509 
3510 	hci_conn_drop(conn);
3511 
3512 	/* The device is paired so there is no need to remove
3513 	 * its connection parameters anymore.
3514 	 */
3515 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3516 
3517 	hci_conn_put(conn);
3518 
3519 	return err;
3520 }
3521 
3522 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3523 {
3524 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3525 	struct mgmt_pending_cmd *cmd;
3526 
3527 	cmd = find_pairing(conn);
3528 	if (cmd) {
3529 		cmd->cmd_complete(cmd, status);
3530 		mgmt_pending_remove(cmd);
3531 	}
3532 }
3533 
3534 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3535 {
3536 	struct mgmt_pending_cmd *cmd;
3537 
3538 	BT_DBG("status %u", status);
3539 
3540 	cmd = find_pairing(conn);
3541 	if (!cmd) {
3542 		BT_DBG("Unable to find a pending command");
3543 		return;
3544 	}
3545 
3546 	cmd->cmd_complete(cmd, mgmt_status(status));
3547 	mgmt_pending_remove(cmd);
3548 }
3549 
3550 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3551 {
3552 	struct mgmt_pending_cmd *cmd;
3553 
3554 	BT_DBG("status %u", status);
3555 
3556 	if (!status)
3557 		return;
3558 
3559 	cmd = find_pairing(conn);
3560 	if (!cmd) {
3561 		BT_DBG("Unable to find a pending command");
3562 		return;
3563 	}
3564 
3565 	cmd->cmd_complete(cmd, mgmt_status(status));
3566 	mgmt_pending_remove(cmd);
3567 }
3568 
3569 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3570 		       u16 len)
3571 {
3572 	struct mgmt_cp_pair_device *cp = data;
3573 	struct mgmt_rp_pair_device rp;
3574 	struct mgmt_pending_cmd *cmd;
3575 	u8 sec_level, auth_type;
3576 	struct hci_conn *conn;
3577 	int err;
3578 
3579 	bt_dev_dbg(hdev, "sock %p", sk);
3580 
3581 	memset(&rp, 0, sizeof(rp));
3582 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3583 	rp.addr.type = cp->addr.type;
3584 
3585 	if (!bdaddr_type_is_valid(cp->addr.type))
3586 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3587 					 MGMT_STATUS_INVALID_PARAMS,
3588 					 &rp, sizeof(rp));
3589 
3590 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3591 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3592 					 MGMT_STATUS_INVALID_PARAMS,
3593 					 &rp, sizeof(rp));
3594 
3595 	hci_dev_lock(hdev);
3596 
3597 	if (!hdev_is_powered(hdev)) {
3598 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3599 					MGMT_STATUS_NOT_POWERED, &rp,
3600 					sizeof(rp));
3601 		goto unlock;
3602 	}
3603 
3604 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3605 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3606 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3607 					sizeof(rp));
3608 		goto unlock;
3609 	}
3610 
3611 	sec_level = BT_SECURITY_MEDIUM;
3612 	auth_type = HCI_AT_DEDICATED_BONDING;
3613 
3614 	if (cp->addr.type == BDADDR_BREDR) {
3615 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3616 				       auth_type, CONN_REASON_PAIR_DEVICE,
3617 				       HCI_ACL_CONN_TIMEOUT);
3618 	} else {
3619 		u8 addr_type = le_addr_type(cp->addr.type);
3620 		struct hci_conn_params *p;
3621 
3622 		/* When pairing a new device, it is expected to remember
3623 		 * this device for future connections. Adding the connection
3624 		 * parameter information ahead of time allows tracking
3625 		 * of the peripheral preferred values and will speed up any
3626 		 * further connection establishment.
3627 		 *
3628 		 * If connection parameters already exist, then they
3629 		 * will be kept and this function does nothing.
3630 		 */
3631 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3632 		if (!p) {
3633 			err = -EIO;
3634 			goto unlock;
3635 		}
3636 
3637 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3638 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3639 
3640 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3641 					   sec_level, HCI_LE_CONN_TIMEOUT,
3642 					   CONN_REASON_PAIR_DEVICE);
3643 	}
3644 
3645 	if (IS_ERR(conn)) {
3646 		int status;
3647 
3648 		if (PTR_ERR(conn) == -EBUSY)
3649 			status = MGMT_STATUS_BUSY;
3650 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3651 			status = MGMT_STATUS_NOT_SUPPORTED;
3652 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3653 			status = MGMT_STATUS_REJECTED;
3654 		else
3655 			status = MGMT_STATUS_CONNECT_FAILED;
3656 
3657 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3658 					status, &rp, sizeof(rp));
3659 		goto unlock;
3660 	}
3661 
3662 	if (conn->connect_cfm_cb) {
3663 		hci_conn_drop(conn);
3664 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3665 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3666 		goto unlock;
3667 	}
3668 
3669 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3670 	if (!cmd) {
3671 		err = -ENOMEM;
3672 		hci_conn_drop(conn);
3673 		goto unlock;
3674 	}
3675 
3676 	cmd->cmd_complete = pairing_complete;
3677 
3678 	/* For LE, just connecting isn't a proof that the pairing finished */
3679 	if (cp->addr.type == BDADDR_BREDR) {
3680 		conn->connect_cfm_cb = pairing_complete_cb;
3681 		conn->security_cfm_cb = pairing_complete_cb;
3682 		conn->disconn_cfm_cb = pairing_complete_cb;
3683 	} else {
3684 		conn->connect_cfm_cb = le_pairing_complete_cb;
3685 		conn->security_cfm_cb = le_pairing_complete_cb;
3686 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3687 	}
3688 
3689 	conn->io_capability = cp->io_cap;
3690 	cmd->user_data = hci_conn_get(conn);
3691 
3692 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3693 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3694 		cmd->cmd_complete(cmd, 0);
3695 		mgmt_pending_remove(cmd);
3696 	}
3697 
3698 	err = 0;
3699 
3700 unlock:
3701 	hci_dev_unlock(hdev);
3702 	return err;
3703 }
3704 
3705 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3706 			      u16 len)
3707 {
3708 	struct mgmt_addr_info *addr = data;
3709 	struct mgmt_pending_cmd *cmd;
3710 	struct hci_conn *conn;
3711 	int err;
3712 
3713 	bt_dev_dbg(hdev, "sock %p", sk);
3714 
3715 	hci_dev_lock(hdev);
3716 
3717 	if (!hdev_is_powered(hdev)) {
3718 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3719 				      MGMT_STATUS_NOT_POWERED);
3720 		goto unlock;
3721 	}
3722 
3723 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3724 	if (!cmd) {
3725 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3726 				      MGMT_STATUS_INVALID_PARAMS);
3727 		goto unlock;
3728 	}
3729 
3730 	conn = cmd->user_data;
3731 
3732 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3733 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3734 				      MGMT_STATUS_INVALID_PARAMS);
3735 		goto unlock;
3736 	}
3737 
3738 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3739 	mgmt_pending_remove(cmd);
3740 
3741 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3742 				addr, sizeof(*addr));
3743 
3744 	/* Since user doesn't want to proceed with the connection, abort any
3745 	 * ongoing pairing and then terminate the link if it was created
3746 	 * because of the pair device action.
3747 	 */
3748 	if (addr->type == BDADDR_BREDR)
3749 		hci_remove_link_key(hdev, &addr->bdaddr);
3750 	else
3751 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3752 					      le_addr_type(addr->type));
3753 
3754 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3755 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3756 
3757 unlock:
3758 	hci_dev_unlock(hdev);
3759 	return err;
3760 }
3761 
3762 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3763 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3764 			     u16 hci_op, __le32 passkey)
3765 {
3766 	struct mgmt_pending_cmd *cmd;
3767 	struct hci_conn *conn;
3768 	int err;
3769 
3770 	hci_dev_lock(hdev);
3771 
3772 	if (!hdev_is_powered(hdev)) {
3773 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3774 					MGMT_STATUS_NOT_POWERED, addr,
3775 					sizeof(*addr));
3776 		goto done;
3777 	}
3778 
3779 	if (addr->type == BDADDR_BREDR)
3780 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3781 	else
3782 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3783 					       le_addr_type(addr->type));
3784 
3785 	if (!conn) {
3786 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3787 					MGMT_STATUS_NOT_CONNECTED, addr,
3788 					sizeof(*addr));
3789 		goto done;
3790 	}
3791 
3792 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3793 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3794 		if (!err)
3795 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3796 						MGMT_STATUS_SUCCESS, addr,
3797 						sizeof(*addr));
3798 		else
3799 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3800 						MGMT_STATUS_FAILED, addr,
3801 						sizeof(*addr));
3802 
3803 		goto done;
3804 	}
3805 
3806 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3807 	if (!cmd) {
3808 		err = -ENOMEM;
3809 		goto done;
3810 	}
3811 
3812 	cmd->cmd_complete = addr_cmd_complete;
3813 
3814 	/* Continue with pairing via HCI */
3815 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3816 		struct hci_cp_user_passkey_reply cp;
3817 
3818 		bacpy(&cp.bdaddr, &addr->bdaddr);
3819 		cp.passkey = passkey;
3820 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3821 	} else
3822 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3823 				   &addr->bdaddr);
3824 
3825 	if (err < 0)
3826 		mgmt_pending_remove(cmd);
3827 
3828 done:
3829 	hci_dev_unlock(hdev);
3830 	return err;
3831 }
3832 
3833 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3834 			      void *data, u16 len)
3835 {
3836 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	return user_pairing_resp(sk, hdev, &cp->addr,
3841 				MGMT_OP_PIN_CODE_NEG_REPLY,
3842 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3843 }
3844 
3845 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3846 			      u16 len)
3847 {
3848 	struct mgmt_cp_user_confirm_reply *cp = data;
3849 
3850 	bt_dev_dbg(hdev, "sock %p", sk);
3851 
3852 	if (len != sizeof(*cp))
3853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3854 				       MGMT_STATUS_INVALID_PARAMS);
3855 
3856 	return user_pairing_resp(sk, hdev, &cp->addr,
3857 				 MGMT_OP_USER_CONFIRM_REPLY,
3858 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3859 }
3860 
3861 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3862 				  void *data, u16 len)
3863 {
3864 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3865 
3866 	bt_dev_dbg(hdev, "sock %p", sk);
3867 
3868 	return user_pairing_resp(sk, hdev, &cp->addr,
3869 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3870 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3871 }
3872 
3873 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3874 			      u16 len)
3875 {
3876 	struct mgmt_cp_user_passkey_reply *cp = data;
3877 
3878 	bt_dev_dbg(hdev, "sock %p", sk);
3879 
3880 	return user_pairing_resp(sk, hdev, &cp->addr,
3881 				 MGMT_OP_USER_PASSKEY_REPLY,
3882 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3883 }
3884 
3885 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3886 				  void *data, u16 len)
3887 {
3888 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3889 
3890 	bt_dev_dbg(hdev, "sock %p", sk);
3891 
3892 	return user_pairing_resp(sk, hdev, &cp->addr,
3893 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3894 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3895 }
3896 
3897 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3898 {
3899 	struct adv_info *adv_instance;
3900 
3901 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3902 	if (!adv_instance)
3903 		return 0;
3904 
3905 	/* stop if current instance doesn't need to be changed */
3906 	if (!(adv_instance->flags & flags))
3907 		return 0;
3908 
3909 	cancel_adv_timeout(hdev);
3910 
3911 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3912 	if (!adv_instance)
3913 		return 0;
3914 
3915 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3916 
3917 	return 0;
3918 }
3919 
3920 static int name_changed_sync(struct hci_dev *hdev, void *data)
3921 {
3922 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3923 }
3924 
3925 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3926 {
3927 	struct mgmt_pending_cmd *cmd = data;
3928 	struct mgmt_cp_set_local_name *cp;
3929 	u8 status = mgmt_status(err);
3930 
3931 	bt_dev_dbg(hdev, "err %d", err);
3932 
3933 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
3934 		return;
3935 
3936 	cp = cmd->param;
3937 
3938 	if (status) {
3939 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3940 				status);
3941 	} else {
3942 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3943 				  cp, sizeof(*cp));
3944 
3945 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3947 	}
3948 
3949 	mgmt_pending_free(cmd);
3950 }
3951 
3952 static int set_name_sync(struct hci_dev *hdev, void *data)
3953 {
3954 	struct mgmt_pending_cmd *cmd = data;
3955 	struct mgmt_cp_set_local_name cp;
3956 
3957 	mutex_lock(&hdev->mgmt_pending_lock);
3958 
3959 	if (!__mgmt_pending_listed(hdev, cmd)) {
3960 		mutex_unlock(&hdev->mgmt_pending_lock);
3961 		return -ECANCELED;
3962 	}
3963 
3964 	memcpy(&cp, cmd->param, sizeof(cp));
3965 
3966 	mutex_unlock(&hdev->mgmt_pending_lock);
3967 
3968 	if (lmp_bredr_capable(hdev)) {
3969 		hci_update_name_sync(hdev, cp.name);
3970 		hci_update_eir_sync(hdev);
3971 	}
3972 
3973 	/* The name is stored in the scan response data and so
3974 	 * no need to update the advertising data here.
3975 	 */
3976 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3977 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3978 
3979 	return 0;
3980 }
3981 
3982 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3983 			  u16 len)
3984 {
3985 	struct mgmt_cp_set_local_name *cp = data;
3986 	struct mgmt_pending_cmd *cmd;
3987 	int err;
3988 
3989 	bt_dev_dbg(hdev, "sock %p", sk);
3990 
3991 	hci_dev_lock(hdev);
3992 
3993 	/* If the old values are the same as the new ones just return a
3994 	 * direct command complete event.
3995 	 */
3996 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3997 	    !memcmp(hdev->short_name, cp->short_name,
3998 		    sizeof(hdev->short_name))) {
3999 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4000 					data, len);
4001 		goto failed;
4002 	}
4003 
4004 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
4005 
4006 	if (!hdev_is_powered(hdev)) {
4007 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4008 
4009 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
4010 					data, len);
4011 		if (err < 0)
4012 			goto failed;
4013 
4014 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
4015 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
4016 		ext_info_changed(hdev, sk);
4017 
4018 		goto failed;
4019 	}
4020 
4021 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
4022 	if (!cmd)
4023 		err = -ENOMEM;
4024 	else
4025 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
4026 					 set_name_complete);
4027 
4028 	if (err < 0) {
4029 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
4030 				      MGMT_STATUS_FAILED);
4031 
4032 		if (cmd)
4033 			mgmt_pending_remove(cmd);
4034 
4035 		goto failed;
4036 	}
4037 
4038 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
4039 
4040 failed:
4041 	hci_dev_unlock(hdev);
4042 	return err;
4043 }
4044 
4045 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
4046 {
4047 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
4048 }
4049 
4050 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
4051 			  u16 len)
4052 {
4053 	struct mgmt_cp_set_appearance *cp = data;
4054 	u16 appearance;
4055 	int err;
4056 
4057 	bt_dev_dbg(hdev, "sock %p", sk);
4058 
4059 	if (!lmp_le_capable(hdev))
4060 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
4061 				       MGMT_STATUS_NOT_SUPPORTED);
4062 
4063 	appearance = le16_to_cpu(cp->appearance);
4064 
4065 	hci_dev_lock(hdev);
4066 
4067 	if (hdev->appearance != appearance) {
4068 		hdev->appearance = appearance;
4069 
4070 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4071 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
4072 					   NULL);
4073 
4074 		ext_info_changed(hdev, sk);
4075 	}
4076 
4077 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
4078 				0);
4079 
4080 	hci_dev_unlock(hdev);
4081 
4082 	return err;
4083 }
4084 
4085 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4086 				 void *data, u16 len)
4087 {
4088 	struct mgmt_rp_get_phy_configuration rp;
4089 
4090 	bt_dev_dbg(hdev, "sock %p", sk);
4091 
4092 	hci_dev_lock(hdev);
4093 
4094 	memset(&rp, 0, sizeof(rp));
4095 
4096 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
4097 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4098 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
4099 
4100 	hci_dev_unlock(hdev);
4101 
4102 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
4103 				 &rp, sizeof(rp));
4104 }
4105 
4106 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
4107 {
4108 	struct mgmt_ev_phy_configuration_changed ev;
4109 
4110 	memset(&ev, 0, sizeof(ev));
4111 
4112 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
4113 
4114 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
4115 			  sizeof(ev), skip);
4116 }
4117 
4118 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
4119 {
4120 	struct mgmt_pending_cmd *cmd = data;
4121 	struct sk_buff *skb;
4122 	u8 status = mgmt_status(err);
4123 
4124 	skb = cmd->skb;
4125 
4126 	if (!status) {
4127 		if (!skb)
4128 			status = MGMT_STATUS_FAILED;
4129 		else if (IS_ERR(skb))
4130 			status = mgmt_status(PTR_ERR(skb));
4131 		else
4132 			status = mgmt_status(skb->data[0]);
4133 	}
4134 
4135 	bt_dev_dbg(hdev, "status %d", status);
4136 
4137 	if (status) {
4138 		mgmt_cmd_status(cmd->sk, hdev->id,
4139 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4140 	} else {
4141 		mgmt_cmd_complete(cmd->sk, hdev->id,
4142 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4143 				  NULL, 0);
4144 
4145 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4146 	}
4147 
4148 	if (skb && !IS_ERR(skb))
4149 		kfree_skb(skb);
4150 
4151 	mgmt_pending_free(cmd);
4152 }
4153 
4154 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4155 {
4156 	struct mgmt_pending_cmd *cmd = data;
4157 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4158 	struct hci_cp_le_set_default_phy cp_phy;
4159 	u32 selected_phys;
4160 
4161 	selected_phys = __le32_to_cpu(cp->selected_phys);
4162 
4163 	memset(&cp_phy, 0, sizeof(cp_phy));
4164 
4165 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4166 		cp_phy.all_phys |= 0x01;
4167 
4168 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4169 		cp_phy.all_phys |= 0x02;
4170 
4171 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4172 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4173 
4174 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4175 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4176 
4177 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4178 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4179 
4180 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4181 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4182 
4183 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4184 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4185 
4186 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4187 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4188 
4189 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4190 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4191 
4192 	return 0;
4193 }
4194 
4195 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4196 				 void *data, u16 len)
4197 {
4198 	struct mgmt_cp_set_phy_configuration *cp = data;
4199 	struct mgmt_pending_cmd *cmd;
4200 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4201 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4202 	bool changed = false;
4203 	int err;
4204 
4205 	bt_dev_dbg(hdev, "sock %p", sk);
4206 
4207 	configurable_phys = get_configurable_phys(hdev);
4208 	supported_phys = get_supported_phys(hdev);
4209 	selected_phys = __le32_to_cpu(cp->selected_phys);
4210 
4211 	if (selected_phys & ~supported_phys)
4212 		return mgmt_cmd_status(sk, hdev->id,
4213 				       MGMT_OP_SET_PHY_CONFIGURATION,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 
4216 	unconfigure_phys = supported_phys & ~configurable_phys;
4217 
4218 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4219 		return mgmt_cmd_status(sk, hdev->id,
4220 				       MGMT_OP_SET_PHY_CONFIGURATION,
4221 				       MGMT_STATUS_INVALID_PARAMS);
4222 
4223 	if (selected_phys == get_selected_phys(hdev))
4224 		return mgmt_cmd_complete(sk, hdev->id,
4225 					 MGMT_OP_SET_PHY_CONFIGURATION,
4226 					 0, NULL, 0);
4227 
4228 	hci_dev_lock(hdev);
4229 
4230 	if (!hdev_is_powered(hdev)) {
4231 		err = mgmt_cmd_status(sk, hdev->id,
4232 				      MGMT_OP_SET_PHY_CONFIGURATION,
4233 				      MGMT_STATUS_REJECTED);
4234 		goto unlock;
4235 	}
4236 
4237 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4238 		err = mgmt_cmd_status(sk, hdev->id,
4239 				      MGMT_OP_SET_PHY_CONFIGURATION,
4240 				      MGMT_STATUS_BUSY);
4241 		goto unlock;
4242 	}
4243 
4244 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4245 		pkt_type |= (HCI_DH3 | HCI_DM3);
4246 	else
4247 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4248 
4249 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4250 		pkt_type |= (HCI_DH5 | HCI_DM5);
4251 	else
4252 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4253 
4254 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4255 		pkt_type &= ~HCI_2DH1;
4256 	else
4257 		pkt_type |= HCI_2DH1;
4258 
4259 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4260 		pkt_type &= ~HCI_2DH3;
4261 	else
4262 		pkt_type |= HCI_2DH3;
4263 
4264 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4265 		pkt_type &= ~HCI_2DH5;
4266 	else
4267 		pkt_type |= HCI_2DH5;
4268 
4269 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4270 		pkt_type &= ~HCI_3DH1;
4271 	else
4272 		pkt_type |= HCI_3DH1;
4273 
4274 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4275 		pkt_type &= ~HCI_3DH3;
4276 	else
4277 		pkt_type |= HCI_3DH3;
4278 
4279 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4280 		pkt_type &= ~HCI_3DH5;
4281 	else
4282 		pkt_type |= HCI_3DH5;
4283 
4284 	if (pkt_type != hdev->pkt_type) {
4285 		hdev->pkt_type = pkt_type;
4286 		changed = true;
4287 	}
4288 
4289 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4290 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4291 		if (changed)
4292 			mgmt_phy_configuration_changed(hdev, sk);
4293 
4294 		err = mgmt_cmd_complete(sk, hdev->id,
4295 					MGMT_OP_SET_PHY_CONFIGURATION,
4296 					0, NULL, 0);
4297 
4298 		goto unlock;
4299 	}
4300 
4301 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4302 			       len);
4303 	if (!cmd)
4304 		err = -ENOMEM;
4305 	else
4306 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4307 					 set_default_phy_complete);
4308 
4309 	if (err < 0) {
4310 		err = mgmt_cmd_status(sk, hdev->id,
4311 				      MGMT_OP_SET_PHY_CONFIGURATION,
4312 				      MGMT_STATUS_FAILED);
4313 
4314 		if (cmd)
4315 			mgmt_pending_remove(cmd);
4316 	}
4317 
4318 unlock:
4319 	hci_dev_unlock(hdev);
4320 
4321 	return err;
4322 }
4323 
4324 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4325 			    u16 len)
4326 {
4327 	int err = MGMT_STATUS_SUCCESS;
4328 	struct mgmt_cp_set_blocked_keys *keys = data;
4329 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4330 				   sizeof(struct mgmt_blocked_key_info));
4331 	u16 key_count, expected_len;
4332 	int i;
4333 
4334 	bt_dev_dbg(hdev, "sock %p", sk);
4335 
4336 	key_count = __le16_to_cpu(keys->key_count);
4337 	if (key_count > max_key_count) {
4338 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4339 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4340 				       MGMT_STATUS_INVALID_PARAMS);
4341 	}
4342 
4343 	expected_len = struct_size(keys, keys, key_count);
4344 	if (expected_len != len) {
4345 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4346 			   expected_len, len);
4347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4348 				       MGMT_STATUS_INVALID_PARAMS);
4349 	}
4350 
4351 	hci_dev_lock(hdev);
4352 
4353 	hci_blocked_keys_clear(hdev);
4354 
4355 	for (i = 0; i < key_count; ++i) {
4356 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4357 
4358 		if (!b) {
4359 			err = MGMT_STATUS_NO_RESOURCES;
4360 			break;
4361 		}
4362 
4363 		b->type = keys->keys[i].type;
4364 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4365 		list_add_rcu(&b->list, &hdev->blocked_keys);
4366 	}
4367 	hci_dev_unlock(hdev);
4368 
4369 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4370 				err, NULL, 0);
4371 }
4372 
4373 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4374 			       void *data, u16 len)
4375 {
4376 	struct mgmt_mode *cp = data;
4377 	int err;
4378 	bool changed = false;
4379 
4380 	bt_dev_dbg(hdev, "sock %p", sk);
4381 
4382 	if (!hci_test_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED))
4383 		return mgmt_cmd_status(sk, hdev->id,
4384 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4385 				       MGMT_STATUS_NOT_SUPPORTED);
4386 
4387 	if (cp->val != 0x00 && cp->val != 0x01)
4388 		return mgmt_cmd_status(sk, hdev->id,
4389 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4390 				       MGMT_STATUS_INVALID_PARAMS);
4391 
4392 	hci_dev_lock(hdev);
4393 
4394 	if (hdev_is_powered(hdev) &&
4395 	    !!cp->val != hci_dev_test_flag(hdev,
4396 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4397 		err = mgmt_cmd_status(sk, hdev->id,
4398 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4399 				      MGMT_STATUS_REJECTED);
4400 		goto unlock;
4401 	}
4402 
4403 	if (cp->val)
4404 		changed = !hci_dev_test_and_set_flag(hdev,
4405 						   HCI_WIDEBAND_SPEECH_ENABLED);
4406 	else
4407 		changed = hci_dev_test_and_clear_flag(hdev,
4408 						   HCI_WIDEBAND_SPEECH_ENABLED);
4409 
4410 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4411 	if (err < 0)
4412 		goto unlock;
4413 
4414 	if (changed)
4415 		err = new_settings(hdev, sk);
4416 
4417 unlock:
4418 	hci_dev_unlock(hdev);
4419 	return err;
4420 }
4421 
4422 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4423 			       void *data, u16 data_len)
4424 {
4425 	char buf[20];
4426 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4427 	u16 cap_len = 0;
4428 	u8 flags = 0;
4429 	u8 tx_power_range[2];
4430 
4431 	bt_dev_dbg(hdev, "sock %p", sk);
4432 
4433 	memset(&buf, 0, sizeof(buf));
4434 
4435 	hci_dev_lock(hdev);
4436 
4437 	/* When the Read Simple Pairing Options command is supported, then
4438 	 * the remote public key validation is supported.
4439 	 *
4440 	 * Alternatively, when Microsoft extensions are available, they can
4441 	 * indicate support for public key validation as well.
4442 	 */
4443 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4444 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4445 
4446 	flags |= 0x02;		/* Remote public key validation (LE) */
4447 
4448 	/* When the Read Encryption Key Size command is supported, then the
4449 	 * encryption key size is enforced.
4450 	 */
4451 	if (hdev->commands[20] & 0x10)
4452 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4453 
4454 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4455 
4456 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4457 				  &flags, 1);
4458 
4459 	/* When the Read Simple Pairing Options command is supported, then
4460 	 * also max encryption key size information is provided.
4461 	 */
4462 	if (hdev->commands[41] & 0x08)
4463 		cap_len = eir_append_le16(rp->cap, cap_len,
4464 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4465 					  hdev->max_enc_key_size);
4466 
4467 	cap_len = eir_append_le16(rp->cap, cap_len,
4468 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4469 				  SMP_MAX_ENC_KEY_SIZE);
4470 
4471 	/* Append the min/max LE tx power parameters if we were able to fetch
4472 	 * it from the controller
4473 	 */
4474 	if (hdev->commands[38] & 0x80) {
4475 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4476 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4477 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4478 					  tx_power_range, 2);
4479 	}
4480 
4481 	rp->cap_len = cpu_to_le16(cap_len);
4482 
4483 	hci_dev_unlock(hdev);
4484 
4485 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4486 				 rp, sizeof(*rp) + cap_len);
4487 }
4488 
4489 #ifdef CONFIG_BT_FEATURE_DEBUG
4490 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4491 static const u8 debug_uuid[16] = {
4492 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4493 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4494 };
4495 #endif
4496 
4497 /* 330859bc-7506-492d-9370-9a6f0614037f */
4498 static const u8 quality_report_uuid[16] = {
4499 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4500 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4501 };
4502 
4503 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4504 static const u8 offload_codecs_uuid[16] = {
4505 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4506 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4507 };
4508 
4509 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4510 static const u8 le_simultaneous_roles_uuid[16] = {
4511 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4512 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4513 };
4514 
4515 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4516 static const u8 iso_socket_uuid[16] = {
4517 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4518 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4519 };
4520 
4521 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4522 static const u8 mgmt_mesh_uuid[16] = {
4523 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4524 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4525 };
4526 
4527 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4528 				  void *data, u16 data_len)
4529 {
4530 	struct mgmt_rp_read_exp_features_info *rp;
4531 	size_t len;
4532 	u16 idx = 0;
4533 	u32 flags;
4534 	int status;
4535 
4536 	bt_dev_dbg(hdev, "sock %p", sk);
4537 
4538 	/* Enough space for 7 features */
4539 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4540 	rp = kzalloc(len, GFP_KERNEL);
4541 	if (!rp)
4542 		return -ENOMEM;
4543 
4544 #ifdef CONFIG_BT_FEATURE_DEBUG
4545 	flags = bt_dbg_get() ? BIT(0) : 0;
4546 
4547 	memcpy(rp->features[idx].uuid, debug_uuid, 16);
4548 	rp->features[idx].flags = cpu_to_le32(flags);
4549 	idx++;
4550 #endif
4551 
4552 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4553 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4554 			flags = BIT(0);
4555 		else
4556 			flags = 0;
4557 
4558 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4559 		rp->features[idx].flags = cpu_to_le32(flags);
4560 		idx++;
4561 	}
4562 
4563 	if (hdev && (aosp_has_quality_report(hdev) ||
4564 		     hdev->set_quality_report)) {
4565 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4566 			flags = BIT(0);
4567 		else
4568 			flags = 0;
4569 
4570 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4571 		rp->features[idx].flags = cpu_to_le32(flags);
4572 		idx++;
4573 	}
4574 
4575 	if (hdev && hdev->get_data_path_id) {
4576 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4577 			flags = BIT(0);
4578 		else
4579 			flags = 0;
4580 
4581 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4582 		rp->features[idx].flags = cpu_to_le32(flags);
4583 		idx++;
4584 	}
4585 
4586 	if (IS_ENABLED(CONFIG_BT_LE)) {
4587 		flags = iso_inited() ? BIT(0) : 0;
4588 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4589 		rp->features[idx].flags = cpu_to_le32(flags);
4590 		idx++;
4591 	}
4592 
4593 	if (hdev && lmp_le_capable(hdev)) {
4594 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4595 			flags = BIT(0);
4596 		else
4597 			flags = 0;
4598 
4599 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4600 		rp->features[idx].flags = cpu_to_le32(flags);
4601 		idx++;
4602 	}
4603 
4604 	rp->feature_count = cpu_to_le16(idx);
4605 
4606 	/* After reading the experimental features information, enable
4607 	 * the events to update client on any future change.
4608 	 */
4609 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4610 
4611 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4612 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4613 				   0, rp, sizeof(*rp) + (20 * idx));
4614 
4615 	kfree(rp);
4616 	return status;
4617 }
4618 
4619 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4620 			       bool enabled, struct sock *skip)
4621 {
4622 	struct mgmt_ev_exp_feature_changed ev;
4623 
4624 	memset(&ev, 0, sizeof(ev));
4625 	memcpy(ev.uuid, uuid, 16);
4626 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4627 
4628 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4629 				  &ev, sizeof(ev),
4630 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4631 }
4632 
4633 #define EXP_FEAT(_uuid, _set_func)	\
4634 {					\
4635 	.uuid = _uuid,			\
4636 	.set_func = _set_func,		\
4637 }
4638 
4639 /* The zero key uuid is special. Multiple exp features are set through it. */
4640 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4641 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4642 {
4643 	struct mgmt_rp_set_exp_feature rp;
4644 
4645 	memset(rp.uuid, 0, 16);
4646 	rp.flags = cpu_to_le32(0);
4647 
4648 #ifdef CONFIG_BT_FEATURE_DEBUG
4649 	if (!hdev) {
4650 		bool changed = bt_dbg_get();
4651 
4652 		bt_dbg_set(false);
4653 
4654 		if (changed)
4655 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4656 	}
4657 #endif
4658 
4659 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4660 
4661 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4662 				 MGMT_OP_SET_EXP_FEATURE, 0,
4663 				 &rp, sizeof(rp));
4664 }
4665 
4666 #ifdef CONFIG_BT_FEATURE_DEBUG
4667 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4668 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4669 {
4670 	struct mgmt_rp_set_exp_feature rp;
4671 
4672 	bool val, changed;
4673 	int err;
4674 
4675 	/* Command requires to use the non-controller index */
4676 	if (hdev)
4677 		return mgmt_cmd_status(sk, hdev->id,
4678 				       MGMT_OP_SET_EXP_FEATURE,
4679 				       MGMT_STATUS_INVALID_INDEX);
4680 
4681 	/* Parameters are limited to a single octet */
4682 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4683 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4684 				       MGMT_OP_SET_EXP_FEATURE,
4685 				       MGMT_STATUS_INVALID_PARAMS);
4686 
4687 	/* Only boolean on/off is supported */
4688 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4689 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4690 				       MGMT_OP_SET_EXP_FEATURE,
4691 				       MGMT_STATUS_INVALID_PARAMS);
4692 
4693 	val = !!cp->param[0];
4694 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4695 	bt_dbg_set(val);
4696 
4697 	memcpy(rp.uuid, debug_uuid, 16);
4698 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4699 
4700 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4701 
4702 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4703 				MGMT_OP_SET_EXP_FEATURE, 0,
4704 				&rp, sizeof(rp));
4705 
4706 	if (changed)
4707 		exp_feature_changed(hdev, debug_uuid, val, sk);
4708 
4709 	return err;
4710 }
4711 #endif
4712 
4713 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4714 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4715 {
4716 	struct mgmt_rp_set_exp_feature rp;
4717 	bool val, changed;
4718 	int err;
4719 
4720 	/* Command requires to use the controller index */
4721 	if (!hdev)
4722 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4723 				       MGMT_OP_SET_EXP_FEATURE,
4724 				       MGMT_STATUS_INVALID_INDEX);
4725 
4726 	/* Parameters are limited to a single octet */
4727 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4728 		return mgmt_cmd_status(sk, hdev->id,
4729 				       MGMT_OP_SET_EXP_FEATURE,
4730 				       MGMT_STATUS_INVALID_PARAMS);
4731 
4732 	/* Only boolean on/off is supported */
4733 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4734 		return mgmt_cmd_status(sk, hdev->id,
4735 				       MGMT_OP_SET_EXP_FEATURE,
4736 				       MGMT_STATUS_INVALID_PARAMS);
4737 
4738 	val = !!cp->param[0];
4739 
4740 	if (val) {
4741 		changed = !hci_dev_test_and_set_flag(hdev,
4742 						     HCI_MESH_EXPERIMENTAL);
4743 	} else {
4744 		hci_dev_clear_flag(hdev, HCI_MESH);
4745 		changed = hci_dev_test_and_clear_flag(hdev,
4746 						      HCI_MESH_EXPERIMENTAL);
4747 	}
4748 
4749 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4750 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4751 
4752 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4753 
4754 	err = mgmt_cmd_complete(sk, hdev->id,
4755 				MGMT_OP_SET_EXP_FEATURE, 0,
4756 				&rp, sizeof(rp));
4757 
4758 	if (changed)
4759 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4760 
4761 	return err;
4762 }
4763 
4764 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4765 				   struct mgmt_cp_set_exp_feature *cp,
4766 				   u16 data_len)
4767 {
4768 	struct mgmt_rp_set_exp_feature rp;
4769 	bool val, changed;
4770 	int err;
4771 
4772 	/* Command requires to use a valid controller index */
4773 	if (!hdev)
4774 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4775 				       MGMT_OP_SET_EXP_FEATURE,
4776 				       MGMT_STATUS_INVALID_INDEX);
4777 
4778 	/* Parameters are limited to a single octet */
4779 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4780 		return mgmt_cmd_status(sk, hdev->id,
4781 				       MGMT_OP_SET_EXP_FEATURE,
4782 				       MGMT_STATUS_INVALID_PARAMS);
4783 
4784 	/* Only boolean on/off is supported */
4785 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4786 		return mgmt_cmd_status(sk, hdev->id,
4787 				       MGMT_OP_SET_EXP_FEATURE,
4788 				       MGMT_STATUS_INVALID_PARAMS);
4789 
4790 	hci_req_sync_lock(hdev);
4791 
4792 	val = !!cp->param[0];
4793 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4794 
4795 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4796 		err = mgmt_cmd_status(sk, hdev->id,
4797 				      MGMT_OP_SET_EXP_FEATURE,
4798 				      MGMT_STATUS_NOT_SUPPORTED);
4799 		goto unlock_quality_report;
4800 	}
4801 
4802 	if (changed) {
4803 		if (hdev->set_quality_report)
4804 			err = hdev->set_quality_report(hdev, val);
4805 		else
4806 			err = aosp_set_quality_report(hdev, val);
4807 
4808 		if (err) {
4809 			err = mgmt_cmd_status(sk, hdev->id,
4810 					      MGMT_OP_SET_EXP_FEATURE,
4811 					      MGMT_STATUS_FAILED);
4812 			goto unlock_quality_report;
4813 		}
4814 
4815 		if (val)
4816 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4817 		else
4818 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4819 	}
4820 
4821 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4822 
4823 	memcpy(rp.uuid, quality_report_uuid, 16);
4824 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4825 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4826 
4827 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4828 				&rp, sizeof(rp));
4829 
4830 	if (changed)
4831 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4832 
4833 unlock_quality_report:
4834 	hci_req_sync_unlock(hdev);
4835 	return err;
4836 }
4837 
4838 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4839 				  struct mgmt_cp_set_exp_feature *cp,
4840 				  u16 data_len)
4841 {
4842 	bool val, changed;
4843 	int err;
4844 	struct mgmt_rp_set_exp_feature rp;
4845 
4846 	/* Command requires to use a valid controller index */
4847 	if (!hdev)
4848 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4849 				       MGMT_OP_SET_EXP_FEATURE,
4850 				       MGMT_STATUS_INVALID_INDEX);
4851 
4852 	/* Parameters are limited to a single octet */
4853 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4854 		return mgmt_cmd_status(sk, hdev->id,
4855 				       MGMT_OP_SET_EXP_FEATURE,
4856 				       MGMT_STATUS_INVALID_PARAMS);
4857 
4858 	/* Only boolean on/off is supported */
4859 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4860 		return mgmt_cmd_status(sk, hdev->id,
4861 				       MGMT_OP_SET_EXP_FEATURE,
4862 				       MGMT_STATUS_INVALID_PARAMS);
4863 
4864 	val = !!cp->param[0];
4865 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4866 
4867 	if (!hdev->get_data_path_id) {
4868 		return mgmt_cmd_status(sk, hdev->id,
4869 				       MGMT_OP_SET_EXP_FEATURE,
4870 				       MGMT_STATUS_NOT_SUPPORTED);
4871 	}
4872 
4873 	if (changed) {
4874 		if (val)
4875 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4876 		else
4877 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4878 	}
4879 
4880 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4881 		    val, changed);
4882 
4883 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4884 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4885 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4886 	err = mgmt_cmd_complete(sk, hdev->id,
4887 				MGMT_OP_SET_EXP_FEATURE, 0,
4888 				&rp, sizeof(rp));
4889 
4890 	if (changed)
4891 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4892 
4893 	return err;
4894 }
4895 
4896 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4897 					  struct mgmt_cp_set_exp_feature *cp,
4898 					  u16 data_len)
4899 {
4900 	bool val, changed;
4901 	int err;
4902 	struct mgmt_rp_set_exp_feature rp;
4903 
4904 	/* Command requires to use a valid controller index */
4905 	if (!hdev)
4906 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4907 				       MGMT_OP_SET_EXP_FEATURE,
4908 				       MGMT_STATUS_INVALID_INDEX);
4909 
4910 	/* Parameters are limited to a single octet */
4911 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4912 		return mgmt_cmd_status(sk, hdev->id,
4913 				       MGMT_OP_SET_EXP_FEATURE,
4914 				       MGMT_STATUS_INVALID_PARAMS);
4915 
4916 	/* Only boolean on/off is supported */
4917 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4918 		return mgmt_cmd_status(sk, hdev->id,
4919 				       MGMT_OP_SET_EXP_FEATURE,
4920 				       MGMT_STATUS_INVALID_PARAMS);
4921 
4922 	val = !!cp->param[0];
4923 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4924 
4925 	if (!hci_dev_le_state_simultaneous(hdev)) {
4926 		return mgmt_cmd_status(sk, hdev->id,
4927 				       MGMT_OP_SET_EXP_FEATURE,
4928 				       MGMT_STATUS_NOT_SUPPORTED);
4929 	}
4930 
4931 	if (changed) {
4932 		if (val)
4933 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4934 		else
4935 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4936 	}
4937 
4938 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4939 		    val, changed);
4940 
4941 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4942 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4943 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4944 	err = mgmt_cmd_complete(sk, hdev->id,
4945 				MGMT_OP_SET_EXP_FEATURE, 0,
4946 				&rp, sizeof(rp));
4947 
4948 	if (changed)
4949 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4950 
4951 	return err;
4952 }
4953 
4954 #ifdef CONFIG_BT_LE
4955 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4956 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4957 {
4958 	struct mgmt_rp_set_exp_feature rp;
4959 	bool val, changed = false;
4960 	int err;
4961 
4962 	/* Command requires to use the non-controller index */
4963 	if (hdev)
4964 		return mgmt_cmd_status(sk, hdev->id,
4965 				       MGMT_OP_SET_EXP_FEATURE,
4966 				       MGMT_STATUS_INVALID_INDEX);
4967 
4968 	/* Parameters are limited to a single octet */
4969 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4970 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4971 				       MGMT_OP_SET_EXP_FEATURE,
4972 				       MGMT_STATUS_INVALID_PARAMS);
4973 
4974 	/* Only boolean on/off is supported */
4975 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4976 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4977 				       MGMT_OP_SET_EXP_FEATURE,
4978 				       MGMT_STATUS_INVALID_PARAMS);
4979 
4980 	val = cp->param[0] ? true : false;
4981 	if (val)
4982 		err = iso_init();
4983 	else
4984 		err = iso_exit();
4985 
4986 	if (!err)
4987 		changed = true;
4988 
4989 	memcpy(rp.uuid, iso_socket_uuid, 16);
4990 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4991 
4992 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4993 
4994 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4995 				MGMT_OP_SET_EXP_FEATURE, 0,
4996 				&rp, sizeof(rp));
4997 
4998 	if (changed)
4999 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
5000 
5001 	return err;
5002 }
5003 #endif
5004 
5005 static const struct mgmt_exp_feature {
5006 	const u8 *uuid;
5007 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
5008 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
5009 } exp_features[] = {
5010 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5011 #ifdef CONFIG_BT_FEATURE_DEBUG
5012 	EXP_FEAT(debug_uuid, set_debug_func),
5013 #endif
5014 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5015 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5016 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5017 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5018 #ifdef CONFIG_BT_LE
5019 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5020 #endif
5021 
5022 	/* end with a null feature */
5023 	EXP_FEAT(NULL, NULL)
5024 };
5025 
5026 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5027 			   void *data, u16 data_len)
5028 {
5029 	struct mgmt_cp_set_exp_feature *cp = data;
5030 	size_t i = 0;
5031 
5032 	bt_dev_dbg(hdev, "sock %p", sk);
5033 
5034 	for (i = 0; exp_features[i].uuid; i++) {
5035 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5036 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5037 	}
5038 
5039 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5040 			       MGMT_OP_SET_EXP_FEATURE,
5041 			       MGMT_STATUS_NOT_SUPPORTED);
5042 }
5043 
5044 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5045 			    u16 data_len)
5046 {
5047 	struct mgmt_cp_get_device_flags *cp = data;
5048 	struct mgmt_rp_get_device_flags rp;
5049 	struct bdaddr_list_with_flags *br_params;
5050 	struct hci_conn_params *params;
5051 	u32 supported_flags;
5052 	u32 current_flags = 0;
5053 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5054 
5055 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5056 		   &cp->addr.bdaddr, cp->addr.type);
5057 
5058 	hci_dev_lock(hdev);
5059 
5060 	supported_flags = hdev->conn_flags;
5061 
5062 	memset(&rp, 0, sizeof(rp));
5063 
5064 	if (cp->addr.type == BDADDR_BREDR) {
5065 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5066 							      &cp->addr.bdaddr,
5067 							      cp->addr.type);
5068 		if (!br_params)
5069 			goto done;
5070 
5071 		current_flags = br_params->flags;
5072 	} else {
5073 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5074 						le_addr_type(cp->addr.type));
5075 		if (!params)
5076 			goto done;
5077 
5078 		current_flags = params->flags;
5079 	}
5080 
5081 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5082 	rp.addr.type = cp->addr.type;
5083 	rp.supported_flags = cpu_to_le32(supported_flags);
5084 	rp.current_flags = cpu_to_le32(current_flags);
5085 
5086 	status = MGMT_STATUS_SUCCESS;
5087 
5088 done:
5089 	hci_dev_unlock(hdev);
5090 
5091 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5092 				&rp, sizeof(rp));
5093 }
5094 
5095 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5096 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5097 				 u32 supported_flags, u32 current_flags)
5098 {
5099 	struct mgmt_ev_device_flags_changed ev;
5100 
5101 	bacpy(&ev.addr.bdaddr, bdaddr);
5102 	ev.addr.type = bdaddr_type;
5103 	ev.supported_flags = cpu_to_le32(supported_flags);
5104 	ev.current_flags = cpu_to_le32(current_flags);
5105 
5106 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5107 }
5108 
5109 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5110 			    u16 len)
5111 {
5112 	struct mgmt_cp_set_device_flags *cp = data;
5113 	struct bdaddr_list_with_flags *br_params;
5114 	struct hci_conn_params *params;
5115 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5116 	u32 supported_flags;
5117 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5118 
5119 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5120 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5121 
5122 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5123 	supported_flags = hdev->conn_flags;
5124 
5125 	if ((supported_flags | current_flags) != supported_flags) {
5126 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5127 			    current_flags, supported_flags);
5128 		goto done;
5129 	}
5130 
5131 	hci_dev_lock(hdev);
5132 
5133 	if (cp->addr.type == BDADDR_BREDR) {
5134 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5135 							      &cp->addr.bdaddr,
5136 							      cp->addr.type);
5137 
5138 		if (br_params) {
5139 			br_params->flags = current_flags;
5140 			status = MGMT_STATUS_SUCCESS;
5141 		} else {
5142 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5143 				    &cp->addr.bdaddr, cp->addr.type);
5144 		}
5145 
5146 		goto unlock;
5147 	}
5148 
5149 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5150 					le_addr_type(cp->addr.type));
5151 	if (!params) {
5152 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5153 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5154 		goto unlock;
5155 	}
5156 
5157 	supported_flags = hdev->conn_flags;
5158 
5159 	if ((supported_flags | current_flags) != supported_flags) {
5160 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5161 			    current_flags, supported_flags);
5162 		goto unlock;
5163 	}
5164 
5165 	WRITE_ONCE(params->flags, current_flags);
5166 	status = MGMT_STATUS_SUCCESS;
5167 
5168 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5169 	 * has been set.
5170 	 */
5171 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5172 		hci_update_passive_scan(hdev);
5173 
5174 unlock:
5175 	hci_dev_unlock(hdev);
5176 
5177 done:
5178 	if (status == MGMT_STATUS_SUCCESS)
5179 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5180 				     supported_flags, current_flags);
5181 
5182 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5183 				 &cp->addr, sizeof(cp->addr));
5184 }
5185 
5186 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5187 				   u16 handle)
5188 {
5189 	struct mgmt_ev_adv_monitor_added ev;
5190 
5191 	ev.monitor_handle = cpu_to_le16(handle);
5192 
5193 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5194 }
5195 
5196 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
5197 				     __le16 handle)
5198 {
5199 	struct mgmt_ev_adv_monitor_removed ev;
5200 
5201 	ev.monitor_handle = handle;
5202 
5203 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
5204 }
5205 
5206 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5207 				 void *data, u16 len)
5208 {
5209 	struct adv_monitor *monitor = NULL;
5210 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5211 	int handle, err;
5212 	size_t rp_size = 0;
5213 	__u32 supported = 0;
5214 	__u32 enabled = 0;
5215 	__u16 num_handles = 0;
5216 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5217 
5218 	BT_DBG("request for %s", hdev->name);
5219 
5220 	hci_dev_lock(hdev);
5221 
5222 	if (msft_monitor_supported(hdev))
5223 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5224 
5225 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5226 		handles[num_handles++] = monitor->handle;
5227 
5228 	hci_dev_unlock(hdev);
5229 
5230 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5231 	rp = kmalloc(rp_size, GFP_KERNEL);
5232 	if (!rp)
5233 		return -ENOMEM;
5234 
5235 	/* All supported features are currently enabled */
5236 	enabled = supported;
5237 
5238 	rp->supported_features = cpu_to_le32(supported);
5239 	rp->enabled_features = cpu_to_le32(enabled);
5240 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5241 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5242 	rp->num_handles = cpu_to_le16(num_handles);
5243 	if (num_handles)
5244 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5245 
5246 	err = mgmt_cmd_complete(sk, hdev->id,
5247 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5248 				MGMT_STATUS_SUCCESS, rp, rp_size);
5249 
5250 	kfree(rp);
5251 
5252 	return err;
5253 }
5254 
5255 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5256 						   void *data, int status)
5257 {
5258 	struct mgmt_rp_add_adv_patterns_monitor rp;
5259 	struct mgmt_pending_cmd *cmd = data;
5260 	struct adv_monitor *monitor;
5261 
5262 	/* This is likely the result of hdev being closed and mgmt_index_removed
5263 	 * is attempting to clean up any pending command so
5264 	 * hci_adv_monitors_clear is about to be called which will take care of
5265 	 * freeing the adv_monitor instances.
5266 	 */
5267 	if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
5268 		return;
5269 
5270 	monitor = cmd->user_data;
5271 
5272 	hci_dev_lock(hdev);
5273 
5274 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5275 
5276 	if (!status) {
5277 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5278 		hdev->adv_monitors_cnt++;
5279 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5280 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5281 		hci_update_passive_scan(hdev);
5282 	}
5283 
5284 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5285 			  mgmt_status(status), &rp, sizeof(rp));
5286 	mgmt_pending_remove(cmd);
5287 
5288 	hci_dev_unlock(hdev);
5289 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5290 		   rp.monitor_handle, status);
5291 }
5292 
5293 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5294 {
5295 	struct mgmt_pending_cmd *cmd = data;
5296 	struct adv_monitor *mon;
5297 
5298 	mutex_lock(&hdev->mgmt_pending_lock);
5299 
5300 	if (!__mgmt_pending_listed(hdev, cmd)) {
5301 		mutex_unlock(&hdev->mgmt_pending_lock);
5302 		return -ECANCELED;
5303 	}
5304 
5305 	mon = cmd->user_data;
5306 
5307 	mutex_unlock(&hdev->mgmt_pending_lock);
5308 
5309 	return hci_add_adv_monitor(hdev, mon);
5310 }
5311 
5312 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5313 				      struct adv_monitor *m, u8 status,
5314 				      void *data, u16 len, u16 op)
5315 {
5316 	struct mgmt_pending_cmd *cmd;
5317 	int err;
5318 
5319 	hci_dev_lock(hdev);
5320 
5321 	if (status)
5322 		goto unlock;
5323 
5324 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5325 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5326 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5327 		status = MGMT_STATUS_BUSY;
5328 		goto unlock;
5329 	}
5330 
5331 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5332 	if (!cmd) {
5333 		status = MGMT_STATUS_NO_RESOURCES;
5334 		goto unlock;
5335 	}
5336 
5337 	cmd->user_data = m;
5338 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5339 				 mgmt_add_adv_patterns_monitor_complete);
5340 	if (err) {
5341 		if (err == -ENOMEM)
5342 			status = MGMT_STATUS_NO_RESOURCES;
5343 		else
5344 			status = MGMT_STATUS_FAILED;
5345 
5346 		goto unlock;
5347 	}
5348 
5349 	hci_dev_unlock(hdev);
5350 
5351 	return 0;
5352 
5353 unlock:
5354 	hci_free_adv_monitor(hdev, m);
5355 	hci_dev_unlock(hdev);
5356 	return mgmt_cmd_status(sk, hdev->id, op, status);
5357 }
5358 
5359 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5360 				   struct mgmt_adv_rssi_thresholds *rssi)
5361 {
5362 	if (rssi) {
5363 		m->rssi.low_threshold = rssi->low_threshold;
5364 		m->rssi.low_threshold_timeout =
5365 		    __le16_to_cpu(rssi->low_threshold_timeout);
5366 		m->rssi.high_threshold = rssi->high_threshold;
5367 		m->rssi.high_threshold_timeout =
5368 		    __le16_to_cpu(rssi->high_threshold_timeout);
5369 		m->rssi.sampling_period = rssi->sampling_period;
5370 	} else {
5371 		/* Default values. These numbers are the least constricting
5372 		 * parameters for MSFT API to work, so it behaves as if there
5373 		 * are no rssi parameter to consider. May need to be changed
5374 		 * if other API are to be supported.
5375 		 */
5376 		m->rssi.low_threshold = -127;
5377 		m->rssi.low_threshold_timeout = 60;
5378 		m->rssi.high_threshold = -127;
5379 		m->rssi.high_threshold_timeout = 0;
5380 		m->rssi.sampling_period = 0;
5381 	}
5382 }
5383 
5384 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5385 				    struct mgmt_adv_pattern *patterns)
5386 {
5387 	u8 offset = 0, length = 0;
5388 	struct adv_pattern *p = NULL;
5389 	int i;
5390 
5391 	for (i = 0; i < pattern_count; i++) {
5392 		offset = patterns[i].offset;
5393 		length = patterns[i].length;
5394 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5395 		    length > HCI_MAX_EXT_AD_LENGTH ||
5396 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5397 			return MGMT_STATUS_INVALID_PARAMS;
5398 
5399 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5400 		if (!p)
5401 			return MGMT_STATUS_NO_RESOURCES;
5402 
5403 		p->ad_type = patterns[i].ad_type;
5404 		p->offset = patterns[i].offset;
5405 		p->length = patterns[i].length;
5406 		memcpy(p->value, patterns[i].value, p->length);
5407 
5408 		INIT_LIST_HEAD(&p->list);
5409 		list_add(&p->list, &m->patterns);
5410 	}
5411 
5412 	return MGMT_STATUS_SUCCESS;
5413 }
5414 
5415 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5416 				    void *data, u16 len)
5417 {
5418 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5419 	struct adv_monitor *m = NULL;
5420 	u8 status = MGMT_STATUS_SUCCESS;
5421 	size_t expected_size = sizeof(*cp);
5422 
5423 	BT_DBG("request for %s", hdev->name);
5424 
5425 	if (len <= sizeof(*cp)) {
5426 		status = MGMT_STATUS_INVALID_PARAMS;
5427 		goto done;
5428 	}
5429 
5430 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5431 	if (len != expected_size) {
5432 		status = MGMT_STATUS_INVALID_PARAMS;
5433 		goto done;
5434 	}
5435 
5436 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5437 	if (!m) {
5438 		status = MGMT_STATUS_NO_RESOURCES;
5439 		goto done;
5440 	}
5441 
5442 	INIT_LIST_HEAD(&m->patterns);
5443 
5444 	parse_adv_monitor_rssi(m, NULL);
5445 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5446 
5447 done:
5448 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5449 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5450 }
5451 
5452 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5453 					 void *data, u16 len)
5454 {
5455 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5456 	struct adv_monitor *m = NULL;
5457 	u8 status = MGMT_STATUS_SUCCESS;
5458 	size_t expected_size = sizeof(*cp);
5459 
5460 	BT_DBG("request for %s", hdev->name);
5461 
5462 	if (len <= sizeof(*cp)) {
5463 		status = MGMT_STATUS_INVALID_PARAMS;
5464 		goto done;
5465 	}
5466 
5467 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5468 	if (len != expected_size) {
5469 		status = MGMT_STATUS_INVALID_PARAMS;
5470 		goto done;
5471 	}
5472 
5473 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5474 	if (!m) {
5475 		status = MGMT_STATUS_NO_RESOURCES;
5476 		goto done;
5477 	}
5478 
5479 	INIT_LIST_HEAD(&m->patterns);
5480 
5481 	parse_adv_monitor_rssi(m, &cp->rssi);
5482 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5483 
5484 done:
5485 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5486 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5487 }
5488 
5489 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5490 					     void *data, int status)
5491 {
5492 	struct mgmt_rp_remove_adv_monitor rp;
5493 	struct mgmt_pending_cmd *cmd = data;
5494 	struct mgmt_cp_remove_adv_monitor *cp;
5495 
5496 	if (status == -ECANCELED)
5497 		return;
5498 
5499 	hci_dev_lock(hdev);
5500 
5501 	cp = cmd->param;
5502 
5503 	rp.monitor_handle = cp->monitor_handle;
5504 
5505 	if (!status) {
5506 		mgmt_adv_monitor_removed(cmd->sk, hdev, cp->monitor_handle);
5507 		hci_update_passive_scan(hdev);
5508 	}
5509 
5510 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
5511 			  mgmt_status(status), &rp, sizeof(rp));
5512 	mgmt_pending_free(cmd);
5513 
5514 	hci_dev_unlock(hdev);
5515 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5516 		   rp.monitor_handle, status);
5517 }
5518 
5519 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5520 {
5521 	struct mgmt_pending_cmd *cmd = data;
5522 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5523 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5524 
5525 	if (!handle)
5526 		return hci_remove_all_adv_monitor(hdev);
5527 
5528 	return hci_remove_single_adv_monitor(hdev, handle);
5529 }
5530 
5531 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5532 			      void *data, u16 len)
5533 {
5534 	struct mgmt_pending_cmd *cmd;
5535 	int err, status;
5536 
5537 	hci_dev_lock(hdev);
5538 
5539 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5540 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5541 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5542 		status = MGMT_STATUS_BUSY;
5543 		goto unlock;
5544 	}
5545 
5546 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5547 	if (!cmd) {
5548 		status = MGMT_STATUS_NO_RESOURCES;
5549 		goto unlock;
5550 	}
5551 
5552 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5553 				  mgmt_remove_adv_monitor_complete);
5554 
5555 	if (err) {
5556 		mgmt_pending_free(cmd);
5557 
5558 		if (err == -ENOMEM)
5559 			status = MGMT_STATUS_NO_RESOURCES;
5560 		else
5561 			status = MGMT_STATUS_FAILED;
5562 
5563 		goto unlock;
5564 	}
5565 
5566 	hci_dev_unlock(hdev);
5567 
5568 	return 0;
5569 
5570 unlock:
5571 	hci_dev_unlock(hdev);
5572 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5573 			       status);
5574 }
5575 
5576 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
5577 					 int err)
5578 {
5579 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5580 	size_t rp_size = sizeof(mgmt_rp);
5581 	struct mgmt_pending_cmd *cmd = data;
5582 	struct sk_buff *skb = cmd->skb;
5583 	u8 status = mgmt_status(err);
5584 
5585 	if (!status) {
5586 		if (!skb)
5587 			status = MGMT_STATUS_FAILED;
5588 		else if (IS_ERR(skb))
5589 			status = mgmt_status(PTR_ERR(skb));
5590 		else
5591 			status = mgmt_status(skb->data[0]);
5592 	}
5593 
5594 	bt_dev_dbg(hdev, "status %d", status);
5595 
5596 	if (status) {
5597 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5598 				status);
5599 		goto remove;
5600 	}
5601 
5602 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5603 
5604 	if (!bredr_sc_enabled(hdev)) {
5605 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5606 
5607 		if (skb->len < sizeof(*rp)) {
5608 			mgmt_cmd_status(cmd->sk, hdev->id,
5609 					MGMT_OP_READ_LOCAL_OOB_DATA,
5610 					MGMT_STATUS_FAILED);
5611 			goto remove;
5612 		}
5613 
5614 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5615 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5616 
5617 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5618 	} else {
5619 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5620 
5621 		if (skb->len < sizeof(*rp)) {
5622 			mgmt_cmd_status(cmd->sk, hdev->id,
5623 					MGMT_OP_READ_LOCAL_OOB_DATA,
5624 					MGMT_STATUS_FAILED);
5625 			goto remove;
5626 		}
5627 
5628 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5629 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5630 
5631 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5632 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5633 	}
5634 
5635 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5636 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5637 
5638 remove:
5639 	if (skb && !IS_ERR(skb))
5640 		kfree_skb(skb);
5641 
5642 	mgmt_pending_free(cmd);
5643 }
5644 
5645 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5646 {
5647 	struct mgmt_pending_cmd *cmd = data;
5648 
5649 	if (bredr_sc_enabled(hdev))
5650 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5651 	else
5652 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5653 
5654 	if (IS_ERR(cmd->skb))
5655 		return PTR_ERR(cmd->skb);
5656 	else
5657 		return 0;
5658 }
5659 
5660 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5661 			       void *data, u16 data_len)
5662 {
5663 	struct mgmt_pending_cmd *cmd;
5664 	int err;
5665 
5666 	bt_dev_dbg(hdev, "sock %p", sk);
5667 
5668 	hci_dev_lock(hdev);
5669 
5670 	if (!hdev_is_powered(hdev)) {
5671 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5672 				      MGMT_STATUS_NOT_POWERED);
5673 		goto unlock;
5674 	}
5675 
5676 	if (!lmp_ssp_capable(hdev)) {
5677 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678 				      MGMT_STATUS_NOT_SUPPORTED);
5679 		goto unlock;
5680 	}
5681 
5682 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5683 	if (!cmd)
5684 		err = -ENOMEM;
5685 	else
5686 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5687 					 read_local_oob_data_complete);
5688 
5689 	if (err < 0) {
5690 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5691 				      MGMT_STATUS_FAILED);
5692 
5693 		if (cmd)
5694 			mgmt_pending_free(cmd);
5695 	}
5696 
5697 unlock:
5698 	hci_dev_unlock(hdev);
5699 	return err;
5700 }
5701 
5702 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5703 			       void *data, u16 len)
5704 {
5705 	struct mgmt_addr_info *addr = data;
5706 	int err;
5707 
5708 	bt_dev_dbg(hdev, "sock %p", sk);
5709 
5710 	if (!bdaddr_type_is_valid(addr->type))
5711 		return mgmt_cmd_complete(sk, hdev->id,
5712 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5713 					 MGMT_STATUS_INVALID_PARAMS,
5714 					 addr, sizeof(*addr));
5715 
5716 	hci_dev_lock(hdev);
5717 
5718 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5719 		struct mgmt_cp_add_remote_oob_data *cp = data;
5720 		u8 status;
5721 
5722 		if (cp->addr.type != BDADDR_BREDR) {
5723 			err = mgmt_cmd_complete(sk, hdev->id,
5724 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5725 						MGMT_STATUS_INVALID_PARAMS,
5726 						&cp->addr, sizeof(cp->addr));
5727 			goto unlock;
5728 		}
5729 
5730 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5731 					      cp->addr.type, cp->hash,
5732 					      cp->rand, NULL, NULL);
5733 		if (err < 0)
5734 			status = MGMT_STATUS_FAILED;
5735 		else
5736 			status = MGMT_STATUS_SUCCESS;
5737 
5738 		err = mgmt_cmd_complete(sk, hdev->id,
5739 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5740 					&cp->addr, sizeof(cp->addr));
5741 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5742 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5743 		u8 *rand192, *hash192, *rand256, *hash256;
5744 		u8 status;
5745 
5746 		if (bdaddr_type_is_le(cp->addr.type)) {
5747 			/* Enforce zero-valued 192-bit parameters as
5748 			 * long as legacy SMP OOB isn't implemented.
5749 			 */
5750 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5751 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5752 				err = mgmt_cmd_complete(sk, hdev->id,
5753 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5754 							MGMT_STATUS_INVALID_PARAMS,
5755 							addr, sizeof(*addr));
5756 				goto unlock;
5757 			}
5758 
5759 			rand192 = NULL;
5760 			hash192 = NULL;
5761 		} else {
5762 			/* In case one of the P-192 values is set to zero,
5763 			 * then just disable OOB data for P-192.
5764 			 */
5765 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5766 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5767 				rand192 = NULL;
5768 				hash192 = NULL;
5769 			} else {
5770 				rand192 = cp->rand192;
5771 				hash192 = cp->hash192;
5772 			}
5773 		}
5774 
5775 		/* In case one of the P-256 values is set to zero, then just
5776 		 * disable OOB data for P-256.
5777 		 */
5778 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5779 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5780 			rand256 = NULL;
5781 			hash256 = NULL;
5782 		} else {
5783 			rand256 = cp->rand256;
5784 			hash256 = cp->hash256;
5785 		}
5786 
5787 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5788 					      cp->addr.type, hash192, rand192,
5789 					      hash256, rand256);
5790 		if (err < 0)
5791 			status = MGMT_STATUS_FAILED;
5792 		else
5793 			status = MGMT_STATUS_SUCCESS;
5794 
5795 		err = mgmt_cmd_complete(sk, hdev->id,
5796 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5797 					status, &cp->addr, sizeof(cp->addr));
5798 	} else {
5799 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5800 			   len);
5801 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5802 				      MGMT_STATUS_INVALID_PARAMS);
5803 	}
5804 
5805 unlock:
5806 	hci_dev_unlock(hdev);
5807 	return err;
5808 }
5809 
5810 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5811 				  void *data, u16 len)
5812 {
5813 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5814 	u8 status;
5815 	int err;
5816 
5817 	bt_dev_dbg(hdev, "sock %p", sk);
5818 
5819 	if (cp->addr.type != BDADDR_BREDR)
5820 		return mgmt_cmd_complete(sk, hdev->id,
5821 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5822 					 MGMT_STATUS_INVALID_PARAMS,
5823 					 &cp->addr, sizeof(cp->addr));
5824 
5825 	hci_dev_lock(hdev);
5826 
5827 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5828 		hci_remote_oob_data_clear(hdev);
5829 		status = MGMT_STATUS_SUCCESS;
5830 		goto done;
5831 	}
5832 
5833 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5834 	if (err < 0)
5835 		status = MGMT_STATUS_INVALID_PARAMS;
5836 	else
5837 		status = MGMT_STATUS_SUCCESS;
5838 
5839 done:
5840 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5841 				status, &cp->addr, sizeof(cp->addr));
5842 
5843 	hci_dev_unlock(hdev);
5844 	return err;
5845 }
5846 
5847 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5848 				    uint8_t *mgmt_status)
5849 {
5850 	switch (type) {
5851 	case DISCOV_TYPE_LE:
5852 		*mgmt_status = mgmt_le_support(hdev);
5853 		if (*mgmt_status)
5854 			return false;
5855 		break;
5856 	case DISCOV_TYPE_INTERLEAVED:
5857 		*mgmt_status = mgmt_le_support(hdev);
5858 		if (*mgmt_status)
5859 			return false;
5860 		fallthrough;
5861 	case DISCOV_TYPE_BREDR:
5862 		*mgmt_status = mgmt_bredr_support(hdev);
5863 		if (*mgmt_status)
5864 			return false;
5865 		break;
5866 	default:
5867 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5868 		return false;
5869 	}
5870 
5871 	return true;
5872 }
5873 
5874 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5875 {
5876 	struct mgmt_pending_cmd *cmd = data;
5877 
5878 	bt_dev_dbg(hdev, "err %d", err);
5879 
5880 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
5881 		return;
5882 
5883 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
5884 			  cmd->param, 1);
5885 	mgmt_pending_free(cmd);
5886 
5887 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5888 				DISCOVERY_FINDING);
5889 }
5890 
5891 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5892 {
5893 	if (!mgmt_pending_listed(hdev, data))
5894 		return -ECANCELED;
5895 
5896 	return hci_start_discovery_sync(hdev);
5897 }
5898 
5899 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5900 				    u16 op, void *data, u16 len)
5901 {
5902 	struct mgmt_cp_start_discovery *cp = data;
5903 	struct mgmt_pending_cmd *cmd;
5904 	u8 status;
5905 	int err;
5906 
5907 	bt_dev_dbg(hdev, "sock %p", sk);
5908 
5909 	hci_dev_lock(hdev);
5910 
5911 	if (!hdev_is_powered(hdev)) {
5912 		err = mgmt_cmd_complete(sk, hdev->id, op,
5913 					MGMT_STATUS_NOT_POWERED,
5914 					&cp->type, sizeof(cp->type));
5915 		goto failed;
5916 	}
5917 
5918 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5919 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5920 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5921 					&cp->type, sizeof(cp->type));
5922 		goto failed;
5923 	}
5924 
5925 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5926 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5927 					&cp->type, sizeof(cp->type));
5928 		goto failed;
5929 	}
5930 
5931 	/* Can't start discovery when it is paused */
5932 	if (hdev->discovery_paused) {
5933 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5934 					&cp->type, sizeof(cp->type));
5935 		goto failed;
5936 	}
5937 
5938 	/* Clear the discovery filter first to free any previously
5939 	 * allocated memory for the UUID list.
5940 	 */
5941 	hci_discovery_filter_clear(hdev);
5942 
5943 	hdev->discovery.type = cp->type;
5944 	hdev->discovery.report_invalid_rssi = false;
5945 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5946 		hdev->discovery.limited = true;
5947 	else
5948 		hdev->discovery.limited = false;
5949 
5950 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5951 	if (!cmd) {
5952 		err = -ENOMEM;
5953 		goto failed;
5954 	}
5955 
5956 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5957 				 start_discovery_complete);
5958 	if (err < 0) {
5959 		mgmt_pending_remove(cmd);
5960 		goto failed;
5961 	}
5962 
5963 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5964 
5965 failed:
5966 	hci_dev_unlock(hdev);
5967 	return err;
5968 }
5969 
5970 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5971 			   void *data, u16 len)
5972 {
5973 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5974 					data, len);
5975 }
5976 
5977 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5978 				   void *data, u16 len)
5979 {
5980 	return start_discovery_internal(sk, hdev,
5981 					MGMT_OP_START_LIMITED_DISCOVERY,
5982 					data, len);
5983 }
5984 
5985 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5986 				   void *data, u16 len)
5987 {
5988 	struct mgmt_cp_start_service_discovery *cp = data;
5989 	struct mgmt_pending_cmd *cmd;
5990 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5991 	u16 uuid_count, expected_len;
5992 	u8 status;
5993 	int err;
5994 
5995 	bt_dev_dbg(hdev, "sock %p", sk);
5996 
5997 	hci_dev_lock(hdev);
5998 
5999 	if (!hdev_is_powered(hdev)) {
6000 		err = mgmt_cmd_complete(sk, hdev->id,
6001 					MGMT_OP_START_SERVICE_DISCOVERY,
6002 					MGMT_STATUS_NOT_POWERED,
6003 					&cp->type, sizeof(cp->type));
6004 		goto failed;
6005 	}
6006 
6007 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6008 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6009 		err = mgmt_cmd_complete(sk, hdev->id,
6010 					MGMT_OP_START_SERVICE_DISCOVERY,
6011 					MGMT_STATUS_BUSY, &cp->type,
6012 					sizeof(cp->type));
6013 		goto failed;
6014 	}
6015 
6016 	if (hdev->discovery_paused) {
6017 		err = mgmt_cmd_complete(sk, hdev->id,
6018 					MGMT_OP_START_SERVICE_DISCOVERY,
6019 					MGMT_STATUS_BUSY, &cp->type,
6020 					sizeof(cp->type));
6021 		goto failed;
6022 	}
6023 
6024 	uuid_count = __le16_to_cpu(cp->uuid_count);
6025 	if (uuid_count > max_uuid_count) {
6026 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6027 			   uuid_count);
6028 		err = mgmt_cmd_complete(sk, hdev->id,
6029 					MGMT_OP_START_SERVICE_DISCOVERY,
6030 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6031 					sizeof(cp->type));
6032 		goto failed;
6033 	}
6034 
6035 	expected_len = sizeof(*cp) + uuid_count * 16;
6036 	if (expected_len != len) {
6037 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6038 			   expected_len, len);
6039 		err = mgmt_cmd_complete(sk, hdev->id,
6040 					MGMT_OP_START_SERVICE_DISCOVERY,
6041 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042 					sizeof(cp->type));
6043 		goto failed;
6044 	}
6045 
6046 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6047 		err = mgmt_cmd_complete(sk, hdev->id,
6048 					MGMT_OP_START_SERVICE_DISCOVERY,
6049 					status, &cp->type, sizeof(cp->type));
6050 		goto failed;
6051 	}
6052 
6053 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6054 			       hdev, data, len);
6055 	if (!cmd) {
6056 		err = -ENOMEM;
6057 		goto failed;
6058 	}
6059 
6060 	/* Clear the discovery filter first to free any previously
6061 	 * allocated memory for the UUID list.
6062 	 */
6063 	hci_discovery_filter_clear(hdev);
6064 
6065 	hdev->discovery.result_filtering = true;
6066 	hdev->discovery.type = cp->type;
6067 	hdev->discovery.rssi = cp->rssi;
6068 	hdev->discovery.uuid_count = uuid_count;
6069 
6070 	if (uuid_count > 0) {
6071 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6072 						GFP_KERNEL);
6073 		if (!hdev->discovery.uuids) {
6074 			err = mgmt_cmd_complete(sk, hdev->id,
6075 						MGMT_OP_START_SERVICE_DISCOVERY,
6076 						MGMT_STATUS_FAILED,
6077 						&cp->type, sizeof(cp->type));
6078 			mgmt_pending_remove(cmd);
6079 			goto failed;
6080 		}
6081 	}
6082 
6083 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6084 				 start_discovery_complete);
6085 	if (err < 0) {
6086 		mgmt_pending_remove(cmd);
6087 		goto failed;
6088 	}
6089 
6090 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6091 
6092 failed:
6093 	hci_dev_unlock(hdev);
6094 	return err;
6095 }
6096 
6097 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6098 {
6099 	struct mgmt_pending_cmd *cmd = data;
6100 
6101 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
6102 		return;
6103 
6104 	bt_dev_dbg(hdev, "err %d", err);
6105 
6106 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
6107 			  cmd->param, 1);
6108 	mgmt_pending_free(cmd);
6109 
6110 	if (!err)
6111 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6112 }
6113 
6114 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6115 {
6116 	if (!mgmt_pending_listed(hdev, data))
6117 		return -ECANCELED;
6118 
6119 	return hci_stop_discovery_sync(hdev);
6120 }
6121 
6122 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6123 			  u16 len)
6124 {
6125 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6126 	struct mgmt_pending_cmd *cmd;
6127 	int err;
6128 
6129 	bt_dev_dbg(hdev, "sock %p", sk);
6130 
6131 	hci_dev_lock(hdev);
6132 
6133 	if (!hci_discovery_active(hdev)) {
6134 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6135 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6136 					sizeof(mgmt_cp->type));
6137 		goto unlock;
6138 	}
6139 
6140 	if (hdev->discovery.type != mgmt_cp->type) {
6141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6142 					MGMT_STATUS_INVALID_PARAMS,
6143 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6144 		goto unlock;
6145 	}
6146 
6147 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6148 	if (!cmd) {
6149 		err = -ENOMEM;
6150 		goto unlock;
6151 	}
6152 
6153 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6154 				 stop_discovery_complete);
6155 	if (err < 0) {
6156 		mgmt_pending_remove(cmd);
6157 		goto unlock;
6158 	}
6159 
6160 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6161 
6162 unlock:
6163 	hci_dev_unlock(hdev);
6164 	return err;
6165 }
6166 
6167 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6168 			u16 len)
6169 {
6170 	struct mgmt_cp_confirm_name *cp = data;
6171 	struct inquiry_entry *e;
6172 	int err;
6173 
6174 	bt_dev_dbg(hdev, "sock %p", sk);
6175 
6176 	hci_dev_lock(hdev);
6177 
6178 	if (!hci_discovery_active(hdev)) {
6179 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6180 					MGMT_STATUS_FAILED, &cp->addr,
6181 					sizeof(cp->addr));
6182 		goto failed;
6183 	}
6184 
6185 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6186 	if (!e) {
6187 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6188 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6189 					sizeof(cp->addr));
6190 		goto failed;
6191 	}
6192 
6193 	if (cp->name_known) {
6194 		e->name_state = NAME_KNOWN;
6195 		list_del(&e->list);
6196 	} else {
6197 		e->name_state = NAME_NEEDED;
6198 		hci_inquiry_cache_update_resolve(hdev, e);
6199 	}
6200 
6201 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6202 				&cp->addr, sizeof(cp->addr));
6203 
6204 failed:
6205 	hci_dev_unlock(hdev);
6206 	return err;
6207 }
6208 
6209 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6210 			u16 len)
6211 {
6212 	struct mgmt_cp_block_device *cp = data;
6213 	u8 status;
6214 	int err;
6215 
6216 	bt_dev_dbg(hdev, "sock %p", sk);
6217 
6218 	if (!bdaddr_type_is_valid(cp->addr.type))
6219 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6220 					 MGMT_STATUS_INVALID_PARAMS,
6221 					 &cp->addr, sizeof(cp->addr));
6222 
6223 	hci_dev_lock(hdev);
6224 
6225 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6226 				  cp->addr.type);
6227 	if (err < 0) {
6228 		status = MGMT_STATUS_FAILED;
6229 		goto done;
6230 	}
6231 
6232 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6233 		   sk);
6234 	status = MGMT_STATUS_SUCCESS;
6235 
6236 done:
6237 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6238 				&cp->addr, sizeof(cp->addr));
6239 
6240 	hci_dev_unlock(hdev);
6241 
6242 	return err;
6243 }
6244 
6245 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6246 			  u16 len)
6247 {
6248 	struct mgmt_cp_unblock_device *cp = data;
6249 	u8 status;
6250 	int err;
6251 
6252 	bt_dev_dbg(hdev, "sock %p", sk);
6253 
6254 	if (!bdaddr_type_is_valid(cp->addr.type))
6255 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6256 					 MGMT_STATUS_INVALID_PARAMS,
6257 					 &cp->addr, sizeof(cp->addr));
6258 
6259 	hci_dev_lock(hdev);
6260 
6261 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6262 				  cp->addr.type);
6263 	if (err < 0) {
6264 		status = MGMT_STATUS_INVALID_PARAMS;
6265 		goto done;
6266 	}
6267 
6268 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6269 		   sk);
6270 	status = MGMT_STATUS_SUCCESS;
6271 
6272 done:
6273 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6274 				&cp->addr, sizeof(cp->addr));
6275 
6276 	hci_dev_unlock(hdev);
6277 
6278 	return err;
6279 }
6280 
6281 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6282 {
6283 	return hci_update_eir_sync(hdev);
6284 }
6285 
6286 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6287 			 u16 len)
6288 {
6289 	struct mgmt_cp_set_device_id *cp = data;
6290 	int err;
6291 	__u16 source;
6292 
6293 	bt_dev_dbg(hdev, "sock %p", sk);
6294 
6295 	source = __le16_to_cpu(cp->source);
6296 
6297 	if (source > 0x0002)
6298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6299 				       MGMT_STATUS_INVALID_PARAMS);
6300 
6301 	hci_dev_lock(hdev);
6302 
6303 	hdev->devid_source = source;
6304 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6305 	hdev->devid_product = __le16_to_cpu(cp->product);
6306 	hdev->devid_version = __le16_to_cpu(cp->version);
6307 
6308 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6309 				NULL, 0);
6310 
6311 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6312 
6313 	hci_dev_unlock(hdev);
6314 
6315 	return err;
6316 }
6317 
6318 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6319 {
6320 	if (err)
6321 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6322 	else
6323 		bt_dev_dbg(hdev, "status %d", err);
6324 }
6325 
6326 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6327 {
6328 	struct mgmt_pending_cmd *cmd = data;
6329 	struct cmd_lookup match = { NULL, hdev };
6330 	u8 instance;
6331 	struct adv_info *adv_instance;
6332 	u8 status = mgmt_status(err);
6333 
6334 	if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
6335 		return;
6336 
6337 	if (status) {
6338 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
6339 		mgmt_pending_free(cmd);
6340 		return;
6341 	}
6342 
6343 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6344 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6345 	else
6346 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6347 
6348 	settings_rsp(cmd, &match);
6349 
6350 	new_settings(hdev, match.sk);
6351 
6352 	if (match.sk)
6353 		sock_put(match.sk);
6354 
6355 	/* If "Set Advertising" was just disabled and instance advertising was
6356 	 * set up earlier, then re-enable multi-instance advertising.
6357 	 */
6358 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6359 	    list_empty(&hdev->adv_instances))
6360 		return;
6361 
6362 	instance = hdev->cur_adv_instance;
6363 	if (!instance) {
6364 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6365 							struct adv_info, list);
6366 		if (!adv_instance)
6367 			return;
6368 
6369 		instance = adv_instance->instance;
6370 	}
6371 
6372 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6373 
6374 	enable_advertising_instance(hdev, err);
6375 }
6376 
6377 static int set_adv_sync(struct hci_dev *hdev, void *data)
6378 {
6379 	struct mgmt_pending_cmd *cmd = data;
6380 	struct mgmt_mode cp;
6381 	u8 val;
6382 
6383 	mutex_lock(&hdev->mgmt_pending_lock);
6384 
6385 	if (!__mgmt_pending_listed(hdev, cmd)) {
6386 		mutex_unlock(&hdev->mgmt_pending_lock);
6387 		return -ECANCELED;
6388 	}
6389 
6390 	memcpy(&cp, cmd->param, sizeof(cp));
6391 
6392 	mutex_unlock(&hdev->mgmt_pending_lock);
6393 
6394 	val = !!cp.val;
6395 
6396 	if (cp.val == 0x02)
6397 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6398 	else
6399 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6400 
6401 	cancel_adv_timeout(hdev);
6402 
6403 	if (val) {
6404 		/* Switch to instance "0" for the Set Advertising setting.
6405 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6406 		 * HCI_ADVERTISING flag is not yet set.
6407 		 */
6408 		hdev->cur_adv_instance = 0x00;
6409 
6410 		if (ext_adv_capable(hdev)) {
6411 			hci_start_ext_adv_sync(hdev, 0x00);
6412 		} else {
6413 			hci_update_adv_data_sync(hdev, 0x00);
6414 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6415 			hci_enable_advertising_sync(hdev);
6416 		}
6417 	} else {
6418 		hci_disable_advertising_sync(hdev);
6419 	}
6420 
6421 	return 0;
6422 }
6423 
6424 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6425 			   u16 len)
6426 {
6427 	struct mgmt_mode *cp = data;
6428 	struct mgmt_pending_cmd *cmd;
6429 	u8 val, status;
6430 	int err;
6431 
6432 	bt_dev_dbg(hdev, "sock %p", sk);
6433 
6434 	status = mgmt_le_support(hdev);
6435 	if (status)
6436 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6437 				       status);
6438 
6439 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6441 				       MGMT_STATUS_INVALID_PARAMS);
6442 
6443 	if (hdev->advertising_paused)
6444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6445 				       MGMT_STATUS_BUSY);
6446 
6447 	hci_dev_lock(hdev);
6448 
6449 	val = !!cp->val;
6450 
6451 	/* The following conditions are ones which mean that we should
6452 	 * not do any HCI communication but directly send a mgmt
6453 	 * response to user space (after toggling the flag if
6454 	 * necessary).
6455 	 */
6456 	if (!hdev_is_powered(hdev) ||
6457 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6458 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6459 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6460 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6461 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6462 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6463 		bool changed;
6464 
6465 		if (cp->val) {
6466 			hdev->cur_adv_instance = 0x00;
6467 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6468 			if (cp->val == 0x02)
6469 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6470 			else
6471 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6472 		} else {
6473 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6474 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 		}
6476 
6477 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6478 		if (err < 0)
6479 			goto unlock;
6480 
6481 		if (changed)
6482 			err = new_settings(hdev, sk);
6483 
6484 		goto unlock;
6485 	}
6486 
6487 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6488 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6489 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6490 				      MGMT_STATUS_BUSY);
6491 		goto unlock;
6492 	}
6493 
6494 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6495 	if (!cmd)
6496 		err = -ENOMEM;
6497 	else
6498 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6499 					 set_advertising_complete);
6500 
6501 	if (err < 0 && cmd)
6502 		mgmt_pending_remove(cmd);
6503 
6504 unlock:
6505 	hci_dev_unlock(hdev);
6506 	return err;
6507 }
6508 
6509 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6510 			      void *data, u16 len)
6511 {
6512 	struct mgmt_cp_set_static_address *cp = data;
6513 	int err;
6514 
6515 	bt_dev_dbg(hdev, "sock %p", sk);
6516 
6517 	if (!lmp_le_capable(hdev))
6518 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6519 				       MGMT_STATUS_NOT_SUPPORTED);
6520 
6521 	if (hdev_is_powered(hdev))
6522 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6523 				       MGMT_STATUS_REJECTED);
6524 
6525 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6526 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6527 			return mgmt_cmd_status(sk, hdev->id,
6528 					       MGMT_OP_SET_STATIC_ADDRESS,
6529 					       MGMT_STATUS_INVALID_PARAMS);
6530 
6531 		/* Two most significant bits shall be set */
6532 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6533 			return mgmt_cmd_status(sk, hdev->id,
6534 					       MGMT_OP_SET_STATIC_ADDRESS,
6535 					       MGMT_STATUS_INVALID_PARAMS);
6536 	}
6537 
6538 	hci_dev_lock(hdev);
6539 
6540 	bacpy(&hdev->static_addr, &cp->bdaddr);
6541 
6542 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6543 	if (err < 0)
6544 		goto unlock;
6545 
6546 	err = new_settings(hdev, sk);
6547 
6548 unlock:
6549 	hci_dev_unlock(hdev);
6550 	return err;
6551 }
6552 
6553 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6554 			   void *data, u16 len)
6555 {
6556 	struct mgmt_cp_set_scan_params *cp = data;
6557 	__u16 interval, window;
6558 	int err;
6559 
6560 	bt_dev_dbg(hdev, "sock %p", sk);
6561 
6562 	if (!lmp_le_capable(hdev))
6563 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6564 				       MGMT_STATUS_NOT_SUPPORTED);
6565 
6566 	/* Keep allowed ranges in sync with set_mesh() */
6567 	interval = __le16_to_cpu(cp->interval);
6568 
6569 	if (interval < 0x0004 || interval > 0x4000)
6570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 				       MGMT_STATUS_INVALID_PARAMS);
6572 
6573 	window = __le16_to_cpu(cp->window);
6574 
6575 	if (window < 0x0004 || window > 0x4000)
6576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 				       MGMT_STATUS_INVALID_PARAMS);
6578 
6579 	if (window > interval)
6580 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 				       MGMT_STATUS_INVALID_PARAMS);
6582 
6583 	hci_dev_lock(hdev);
6584 
6585 	hdev->le_scan_interval = interval;
6586 	hdev->le_scan_window = window;
6587 
6588 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6589 				NULL, 0);
6590 
6591 	/* If background scan is running, restart it so new parameters are
6592 	 * loaded.
6593 	 */
6594 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6595 	    hdev->discovery.state == DISCOVERY_STOPPED)
6596 		hci_update_passive_scan(hdev);
6597 
6598 	hci_dev_unlock(hdev);
6599 
6600 	return err;
6601 }
6602 
6603 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6604 {
6605 	struct mgmt_pending_cmd *cmd = data;
6606 
6607 	bt_dev_dbg(hdev, "err %d", err);
6608 
6609 	if (err) {
6610 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6611 				mgmt_status(err));
6612 	} else {
6613 		struct mgmt_mode *cp = cmd->param;
6614 
6615 		if (cp->val)
6616 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6617 		else
6618 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6619 
6620 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6621 		new_settings(hdev, cmd->sk);
6622 	}
6623 
6624 	mgmt_pending_free(cmd);
6625 }
6626 
6627 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6628 {
6629 	struct mgmt_pending_cmd *cmd = data;
6630 	struct mgmt_mode *cp = cmd->param;
6631 
6632 	return hci_write_fast_connectable_sync(hdev, cp->val);
6633 }
6634 
6635 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6636 				void *data, u16 len)
6637 {
6638 	struct mgmt_mode *cp = data;
6639 	struct mgmt_pending_cmd *cmd;
6640 	int err;
6641 
6642 	bt_dev_dbg(hdev, "sock %p", sk);
6643 
6644 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6645 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6646 		return mgmt_cmd_status(sk, hdev->id,
6647 				       MGMT_OP_SET_FAST_CONNECTABLE,
6648 				       MGMT_STATUS_NOT_SUPPORTED);
6649 
6650 	if (cp->val != 0x00 && cp->val != 0x01)
6651 		return mgmt_cmd_status(sk, hdev->id,
6652 				       MGMT_OP_SET_FAST_CONNECTABLE,
6653 				       MGMT_STATUS_INVALID_PARAMS);
6654 
6655 	hci_dev_lock(hdev);
6656 
6657 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6658 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6659 		goto unlock;
6660 	}
6661 
6662 	if (!hdev_is_powered(hdev)) {
6663 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6664 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665 		new_settings(hdev, sk);
6666 		goto unlock;
6667 	}
6668 
6669 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6670 			       len);
6671 	if (!cmd)
6672 		err = -ENOMEM;
6673 	else
6674 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6675 					 fast_connectable_complete);
6676 
6677 	if (err < 0) {
6678 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6679 				MGMT_STATUS_FAILED);
6680 
6681 		if (cmd)
6682 			mgmt_pending_free(cmd);
6683 	}
6684 
6685 unlock:
6686 	hci_dev_unlock(hdev);
6687 
6688 	return err;
6689 }
6690 
6691 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6692 {
6693 	struct mgmt_pending_cmd *cmd = data;
6694 
6695 	bt_dev_dbg(hdev, "err %d", err);
6696 
6697 	if (err) {
6698 		u8 mgmt_err = mgmt_status(err);
6699 
6700 		/* We need to restore the flag if related HCI commands
6701 		 * failed.
6702 		 */
6703 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6704 
6705 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6706 	} else {
6707 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6708 		new_settings(hdev, cmd->sk);
6709 	}
6710 
6711 	mgmt_pending_free(cmd);
6712 }
6713 
6714 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6715 {
6716 	int status;
6717 
6718 	status = hci_write_fast_connectable_sync(hdev, false);
6719 
6720 	if (!status)
6721 		status = hci_update_scan_sync(hdev);
6722 
6723 	/* Since only the advertising data flags will change, there
6724 	 * is no need to update the scan response data.
6725 	 */
6726 	if (!status)
6727 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6728 
6729 	return status;
6730 }
6731 
6732 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6733 {
6734 	struct mgmt_mode *cp = data;
6735 	struct mgmt_pending_cmd *cmd;
6736 	int err;
6737 
6738 	bt_dev_dbg(hdev, "sock %p", sk);
6739 
6740 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6742 				       MGMT_STATUS_NOT_SUPPORTED);
6743 
6744 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 				       MGMT_STATUS_REJECTED);
6747 
6748 	if (cp->val != 0x00 && cp->val != 0x01)
6749 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 				       MGMT_STATUS_INVALID_PARAMS);
6751 
6752 	hci_dev_lock(hdev);
6753 
6754 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6755 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6756 		goto unlock;
6757 	}
6758 
6759 	if (!hdev_is_powered(hdev)) {
6760 		if (!cp->val) {
6761 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6762 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6763 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6764 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6765 		}
6766 
6767 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6768 
6769 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6770 		if (err < 0)
6771 			goto unlock;
6772 
6773 		err = new_settings(hdev, sk);
6774 		goto unlock;
6775 	}
6776 
6777 	/* Reject disabling when powered on */
6778 	if (!cp->val) {
6779 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6780 				      MGMT_STATUS_REJECTED);
6781 		goto unlock;
6782 	} else {
6783 		/* When configuring a dual-mode controller to operate
6784 		 * with LE only and using a static address, then switching
6785 		 * BR/EDR back on is not allowed.
6786 		 *
6787 		 * Dual-mode controllers shall operate with the public
6788 		 * address as its identity address for BR/EDR and LE. So
6789 		 * reject the attempt to create an invalid configuration.
6790 		 *
6791 		 * The same restrictions applies when secure connections
6792 		 * has been enabled. For BR/EDR this is a controller feature
6793 		 * while for LE it is a host stack feature. This means that
6794 		 * switching BR/EDR back on when secure connections has been
6795 		 * enabled is not a supported transaction.
6796 		 */
6797 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6798 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6799 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6800 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6801 					      MGMT_STATUS_REJECTED);
6802 			goto unlock;
6803 		}
6804 	}
6805 
6806 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6807 	if (!cmd)
6808 		err = -ENOMEM;
6809 	else
6810 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6811 					 set_bredr_complete);
6812 
6813 	if (err < 0) {
6814 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6815 				MGMT_STATUS_FAILED);
6816 		if (cmd)
6817 			mgmt_pending_free(cmd);
6818 
6819 		goto unlock;
6820 	}
6821 
6822 	/* We need to flip the bit already here so that
6823 	 * hci_req_update_adv_data generates the correct flags.
6824 	 */
6825 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6826 
6827 unlock:
6828 	hci_dev_unlock(hdev);
6829 	return err;
6830 }
6831 
6832 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6833 {
6834 	struct mgmt_pending_cmd *cmd = data;
6835 	struct mgmt_mode *cp;
6836 
6837 	bt_dev_dbg(hdev, "err %d", err);
6838 
6839 	if (err) {
6840 		u8 mgmt_err = mgmt_status(err);
6841 
6842 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
6843 		goto done;
6844 	}
6845 
6846 	cp = cmd->param;
6847 
6848 	switch (cp->val) {
6849 	case 0x00:
6850 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6851 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6852 		break;
6853 	case 0x01:
6854 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6855 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856 		break;
6857 	case 0x02:
6858 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6859 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6860 		break;
6861 	}
6862 
6863 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6864 	new_settings(hdev, cmd->sk);
6865 
6866 done:
6867 	mgmt_pending_free(cmd);
6868 }
6869 
6870 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6871 {
6872 	struct mgmt_pending_cmd *cmd = data;
6873 	struct mgmt_mode *cp = cmd->param;
6874 	u8 val = !!cp->val;
6875 
6876 	/* Force write of val */
6877 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6878 
6879 	return hci_write_sc_support_sync(hdev, val);
6880 }
6881 
6882 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6883 			   void *data, u16 len)
6884 {
6885 	struct mgmt_mode *cp = data;
6886 	struct mgmt_pending_cmd *cmd;
6887 	u8 val;
6888 	int err;
6889 
6890 	bt_dev_dbg(hdev, "sock %p", sk);
6891 
6892 	if (!lmp_sc_capable(hdev) &&
6893 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6894 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6895 				       MGMT_STATUS_NOT_SUPPORTED);
6896 
6897 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6898 	    lmp_sc_capable(hdev) &&
6899 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6900 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6901 				       MGMT_STATUS_REJECTED);
6902 
6903 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6904 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6905 				       MGMT_STATUS_INVALID_PARAMS);
6906 
6907 	hci_dev_lock(hdev);
6908 
6909 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6910 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6911 		bool changed;
6912 
6913 		if (cp->val) {
6914 			changed = !hci_dev_test_and_set_flag(hdev,
6915 							     HCI_SC_ENABLED);
6916 			if (cp->val == 0x02)
6917 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6918 			else
6919 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6920 		} else {
6921 			changed = hci_dev_test_and_clear_flag(hdev,
6922 							      HCI_SC_ENABLED);
6923 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6924 		}
6925 
6926 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6927 		if (err < 0)
6928 			goto failed;
6929 
6930 		if (changed)
6931 			err = new_settings(hdev, sk);
6932 
6933 		goto failed;
6934 	}
6935 
6936 	val = !!cp->val;
6937 
6938 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6939 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6940 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6941 		goto failed;
6942 	}
6943 
6944 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6945 	if (!cmd)
6946 		err = -ENOMEM;
6947 	else
6948 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6949 					 set_secure_conn_complete);
6950 
6951 	if (err < 0) {
6952 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6953 				MGMT_STATUS_FAILED);
6954 		if (cmd)
6955 			mgmt_pending_free(cmd);
6956 	}
6957 
6958 failed:
6959 	hci_dev_unlock(hdev);
6960 	return err;
6961 }
6962 
6963 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6964 			  void *data, u16 len)
6965 {
6966 	struct mgmt_mode *cp = data;
6967 	bool changed, use_changed;
6968 	int err;
6969 
6970 	bt_dev_dbg(hdev, "sock %p", sk);
6971 
6972 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6974 				       MGMT_STATUS_INVALID_PARAMS);
6975 
6976 	hci_dev_lock(hdev);
6977 
6978 	if (cp->val)
6979 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6980 	else
6981 		changed = hci_dev_test_and_clear_flag(hdev,
6982 						      HCI_KEEP_DEBUG_KEYS);
6983 
6984 	if (cp->val == 0x02)
6985 		use_changed = !hci_dev_test_and_set_flag(hdev,
6986 							 HCI_USE_DEBUG_KEYS);
6987 	else
6988 		use_changed = hci_dev_test_and_clear_flag(hdev,
6989 							  HCI_USE_DEBUG_KEYS);
6990 
6991 	if (hdev_is_powered(hdev) && use_changed &&
6992 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6993 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6994 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6995 			     sizeof(mode), &mode);
6996 	}
6997 
6998 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6999 	if (err < 0)
7000 		goto unlock;
7001 
7002 	if (changed)
7003 		err = new_settings(hdev, sk);
7004 
7005 unlock:
7006 	hci_dev_unlock(hdev);
7007 	return err;
7008 }
7009 
7010 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7011 		       u16 len)
7012 {
7013 	struct mgmt_cp_set_privacy *cp = cp_data;
7014 	bool changed;
7015 	int err;
7016 
7017 	bt_dev_dbg(hdev, "sock %p", sk);
7018 
7019 	if (!lmp_le_capable(hdev))
7020 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7021 				       MGMT_STATUS_NOT_SUPPORTED);
7022 
7023 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7024 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7025 				       MGMT_STATUS_INVALID_PARAMS);
7026 
7027 	if (hdev_is_powered(hdev))
7028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7029 				       MGMT_STATUS_REJECTED);
7030 
7031 	hci_dev_lock(hdev);
7032 
7033 	/* If user space supports this command it is also expected to
7034 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7035 	 */
7036 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7037 
7038 	if (cp->privacy) {
7039 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7040 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7041 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7042 		hci_adv_instances_set_rpa_expired(hdev, true);
7043 		if (cp->privacy == 0x02)
7044 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7045 		else
7046 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7047 	} else {
7048 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7049 		memset(hdev->irk, 0, sizeof(hdev->irk));
7050 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7051 		hci_adv_instances_set_rpa_expired(hdev, false);
7052 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7053 	}
7054 
7055 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7056 	if (err < 0)
7057 		goto unlock;
7058 
7059 	if (changed)
7060 		err = new_settings(hdev, sk);
7061 
7062 unlock:
7063 	hci_dev_unlock(hdev);
7064 	return err;
7065 }
7066 
7067 static bool irk_is_valid(struct mgmt_irk_info *irk)
7068 {
7069 	switch (irk->addr.type) {
7070 	case BDADDR_LE_PUBLIC:
7071 		return true;
7072 
7073 	case BDADDR_LE_RANDOM:
7074 		/* Two most significant bits shall be set */
7075 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7076 			return false;
7077 		return true;
7078 	}
7079 
7080 	return false;
7081 }
7082 
7083 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7084 		     u16 len)
7085 {
7086 	struct mgmt_cp_load_irks *cp = cp_data;
7087 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7088 				   sizeof(struct mgmt_irk_info));
7089 	u16 irk_count, expected_len;
7090 	int i, err;
7091 
7092 	bt_dev_dbg(hdev, "sock %p", sk);
7093 
7094 	if (!lmp_le_capable(hdev))
7095 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7096 				       MGMT_STATUS_NOT_SUPPORTED);
7097 
7098 	irk_count = __le16_to_cpu(cp->irk_count);
7099 	if (irk_count > max_irk_count) {
7100 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7101 			   irk_count);
7102 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7103 				       MGMT_STATUS_INVALID_PARAMS);
7104 	}
7105 
7106 	expected_len = struct_size(cp, irks, irk_count);
7107 	if (expected_len != len) {
7108 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7109 			   expected_len, len);
7110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7111 				       MGMT_STATUS_INVALID_PARAMS);
7112 	}
7113 
7114 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7115 
7116 	for (i = 0; i < irk_count; i++) {
7117 		struct mgmt_irk_info *key = &cp->irks[i];
7118 
7119 		if (!irk_is_valid(key))
7120 			return mgmt_cmd_status(sk, hdev->id,
7121 					       MGMT_OP_LOAD_IRKS,
7122 					       MGMT_STATUS_INVALID_PARAMS);
7123 	}
7124 
7125 	hci_dev_lock(hdev);
7126 
7127 	hci_smp_irks_clear(hdev);
7128 
7129 	for (i = 0; i < irk_count; i++) {
7130 		struct mgmt_irk_info *irk = &cp->irks[i];
7131 
7132 		if (hci_is_blocked_key(hdev,
7133 				       HCI_BLOCKED_KEY_TYPE_IRK,
7134 				       irk->val)) {
7135 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7136 				    &irk->addr.bdaddr);
7137 			continue;
7138 		}
7139 
7140 		hci_add_irk(hdev, &irk->addr.bdaddr,
7141 			    le_addr_type(irk->addr.type), irk->val,
7142 			    BDADDR_ANY);
7143 	}
7144 
7145 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7146 
7147 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7148 
7149 	hci_dev_unlock(hdev);
7150 
7151 	return err;
7152 }
7153 
7154 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7155 {
7156 	if (key->initiator != 0x00 && key->initiator != 0x01)
7157 		return false;
7158 
7159 	switch (key->addr.type) {
7160 	case BDADDR_LE_PUBLIC:
7161 		return true;
7162 
7163 	case BDADDR_LE_RANDOM:
7164 		/* Two most significant bits shall be set */
7165 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7166 			return false;
7167 		return true;
7168 	}
7169 
7170 	return false;
7171 }
7172 
7173 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7174 			       void *cp_data, u16 len)
7175 {
7176 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7177 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7178 				   sizeof(struct mgmt_ltk_info));
7179 	u16 key_count, expected_len;
7180 	int i, err;
7181 
7182 	bt_dev_dbg(hdev, "sock %p", sk);
7183 
7184 	if (!lmp_le_capable(hdev))
7185 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7186 				       MGMT_STATUS_NOT_SUPPORTED);
7187 
7188 	key_count = __le16_to_cpu(cp->key_count);
7189 	if (key_count > max_key_count) {
7190 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7191 			   key_count);
7192 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7193 				       MGMT_STATUS_INVALID_PARAMS);
7194 	}
7195 
7196 	expected_len = struct_size(cp, keys, key_count);
7197 	if (expected_len != len) {
7198 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7199 			   expected_len, len);
7200 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7201 				       MGMT_STATUS_INVALID_PARAMS);
7202 	}
7203 
7204 	bt_dev_dbg(hdev, "key_count %u", key_count);
7205 
7206 	hci_dev_lock(hdev);
7207 
7208 	hci_smp_ltks_clear(hdev);
7209 
7210 	for (i = 0; i < key_count; i++) {
7211 		struct mgmt_ltk_info *key = &cp->keys[i];
7212 		u8 type, authenticated;
7213 
7214 		if (hci_is_blocked_key(hdev,
7215 				       HCI_BLOCKED_KEY_TYPE_LTK,
7216 				       key->val)) {
7217 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7218 				    &key->addr.bdaddr);
7219 			continue;
7220 		}
7221 
7222 		if (!ltk_is_valid(key)) {
7223 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7224 				    &key->addr.bdaddr);
7225 			continue;
7226 		}
7227 
7228 		switch (key->type) {
7229 		case MGMT_LTK_UNAUTHENTICATED:
7230 			authenticated = 0x00;
7231 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7232 			break;
7233 		case MGMT_LTK_AUTHENTICATED:
7234 			authenticated = 0x01;
7235 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7236 			break;
7237 		case MGMT_LTK_P256_UNAUTH:
7238 			authenticated = 0x00;
7239 			type = SMP_LTK_P256;
7240 			break;
7241 		case MGMT_LTK_P256_AUTH:
7242 			authenticated = 0x01;
7243 			type = SMP_LTK_P256;
7244 			break;
7245 		case MGMT_LTK_P256_DEBUG:
7246 			authenticated = 0x00;
7247 			type = SMP_LTK_P256_DEBUG;
7248 			fallthrough;
7249 		default:
7250 			continue;
7251 		}
7252 
7253 		hci_add_ltk(hdev, &key->addr.bdaddr,
7254 			    le_addr_type(key->addr.type), type, authenticated,
7255 			    key->val, key->enc_size, key->ediv, key->rand);
7256 	}
7257 
7258 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7259 			   NULL, 0);
7260 
7261 	hci_dev_unlock(hdev);
7262 
7263 	return err;
7264 }
7265 
7266 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7267 {
7268 	struct mgmt_pending_cmd *cmd = data;
7269 	struct hci_conn *conn = cmd->user_data;
7270 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7271 	struct mgmt_rp_get_conn_info rp;
7272 	u8 status;
7273 
7274 	bt_dev_dbg(hdev, "err %d", err);
7275 
7276 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7277 
7278 	status = mgmt_status(err);
7279 	if (status == MGMT_STATUS_SUCCESS) {
7280 		rp.rssi = conn->rssi;
7281 		rp.tx_power = conn->tx_power;
7282 		rp.max_tx_power = conn->max_tx_power;
7283 	} else {
7284 		rp.rssi = HCI_RSSI_INVALID;
7285 		rp.tx_power = HCI_TX_POWER_INVALID;
7286 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7287 	}
7288 
7289 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, MGMT_OP_GET_CONN_INFO, status,
7290 			  &rp, sizeof(rp));
7291 
7292 	mgmt_pending_free(cmd);
7293 }
7294 
7295 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7296 {
7297 	struct mgmt_pending_cmd *cmd = data;
7298 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7299 	struct hci_conn *conn;
7300 	int err;
7301 	__le16   handle;
7302 
7303 	/* Make sure we are still connected */
7304 	if (cp->addr.type == BDADDR_BREDR)
7305 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7306 					       &cp->addr.bdaddr);
7307 	else
7308 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7309 
7310 	if (!conn || conn->state != BT_CONNECTED)
7311 		return MGMT_STATUS_NOT_CONNECTED;
7312 
7313 	cmd->user_data = conn;
7314 	handle = cpu_to_le16(conn->handle);
7315 
7316 	/* Refresh RSSI each time */
7317 	err = hci_read_rssi_sync(hdev, handle);
7318 
7319 	/* For LE links TX power does not change thus we don't need to
7320 	 * query for it once value is known.
7321 	 */
7322 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7323 		     conn->tx_power == HCI_TX_POWER_INVALID))
7324 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7325 
7326 	/* Max TX power needs to be read only once per connection */
7327 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7328 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7329 
7330 	return err;
7331 }
7332 
7333 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7334 			 u16 len)
7335 {
7336 	struct mgmt_cp_get_conn_info *cp = data;
7337 	struct mgmt_rp_get_conn_info rp;
7338 	struct hci_conn *conn;
7339 	unsigned long conn_info_age;
7340 	int err = 0;
7341 
7342 	bt_dev_dbg(hdev, "sock %p", sk);
7343 
7344 	memset(&rp, 0, sizeof(rp));
7345 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7346 	rp.addr.type = cp->addr.type;
7347 
7348 	if (!bdaddr_type_is_valid(cp->addr.type))
7349 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7350 					 MGMT_STATUS_INVALID_PARAMS,
7351 					 &rp, sizeof(rp));
7352 
7353 	hci_dev_lock(hdev);
7354 
7355 	if (!hdev_is_powered(hdev)) {
7356 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7357 					MGMT_STATUS_NOT_POWERED, &rp,
7358 					sizeof(rp));
7359 		goto unlock;
7360 	}
7361 
7362 	if (cp->addr.type == BDADDR_BREDR)
7363 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7364 					       &cp->addr.bdaddr);
7365 	else
7366 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7367 
7368 	if (!conn || conn->state != BT_CONNECTED) {
7369 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7370 					MGMT_STATUS_NOT_CONNECTED, &rp,
7371 					sizeof(rp));
7372 		goto unlock;
7373 	}
7374 
7375 	/* To avoid client trying to guess when to poll again for information we
7376 	 * calculate conn info age as random value between min/max set in hdev.
7377 	 */
7378 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7379 						 hdev->conn_info_max_age - 1);
7380 
7381 	/* Query controller to refresh cached values if they are too old or were
7382 	 * never read.
7383 	 */
7384 	if (time_after(jiffies, conn->conn_info_timestamp +
7385 		       msecs_to_jiffies(conn_info_age)) ||
7386 	    !conn->conn_info_timestamp) {
7387 		struct mgmt_pending_cmd *cmd;
7388 
7389 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7390 				       len);
7391 		if (!cmd) {
7392 			err = -ENOMEM;
7393 		} else {
7394 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7395 						 cmd, get_conn_info_complete);
7396 		}
7397 
7398 		if (err < 0) {
7399 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7400 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7401 
7402 			if (cmd)
7403 				mgmt_pending_free(cmd);
7404 
7405 			goto unlock;
7406 		}
7407 
7408 		conn->conn_info_timestamp = jiffies;
7409 	} else {
7410 		/* Cache is valid, just reply with values cached in hci_conn */
7411 		rp.rssi = conn->rssi;
7412 		rp.tx_power = conn->tx_power;
7413 		rp.max_tx_power = conn->max_tx_power;
7414 
7415 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7416 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7417 	}
7418 
7419 unlock:
7420 	hci_dev_unlock(hdev);
7421 	return err;
7422 }
7423 
7424 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7425 {
7426 	struct mgmt_pending_cmd *cmd = data;
7427 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7428 	struct mgmt_rp_get_clock_info rp;
7429 	struct hci_conn *conn = cmd->user_data;
7430 	u8 status = mgmt_status(err);
7431 
7432 	bt_dev_dbg(hdev, "err %d", err);
7433 
7434 	memset(&rp, 0, sizeof(rp));
7435 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7436 	rp.addr.type = cp->addr.type;
7437 
7438 	if (err)
7439 		goto complete;
7440 
7441 	rp.local_clock = cpu_to_le32(hdev->clock);
7442 
7443 	if (conn) {
7444 		rp.piconet_clock = cpu_to_le32(conn->clock);
7445 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7446 	}
7447 
7448 complete:
7449 	mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, status, &rp,
7450 			  sizeof(rp));
7451 
7452 	mgmt_pending_free(cmd);
7453 }
7454 
7455 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7456 {
7457 	struct mgmt_pending_cmd *cmd = data;
7458 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7459 	struct hci_cp_read_clock hci_cp;
7460 	struct hci_conn *conn;
7461 
7462 	memset(&hci_cp, 0, sizeof(hci_cp));
7463 	hci_read_clock_sync(hdev, &hci_cp);
7464 
7465 	/* Make sure connection still exists */
7466 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7467 	if (!conn || conn->state != BT_CONNECTED)
7468 		return MGMT_STATUS_NOT_CONNECTED;
7469 
7470 	cmd->user_data = conn;
7471 	hci_cp.handle = cpu_to_le16(conn->handle);
7472 	hci_cp.which = 0x01; /* Piconet clock */
7473 
7474 	return hci_read_clock_sync(hdev, &hci_cp);
7475 }
7476 
7477 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7478 								u16 len)
7479 {
7480 	struct mgmt_cp_get_clock_info *cp = data;
7481 	struct mgmt_rp_get_clock_info rp;
7482 	struct mgmt_pending_cmd *cmd;
7483 	struct hci_conn *conn;
7484 	int err;
7485 
7486 	bt_dev_dbg(hdev, "sock %p", sk);
7487 
7488 	memset(&rp, 0, sizeof(rp));
7489 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7490 	rp.addr.type = cp->addr.type;
7491 
7492 	if (cp->addr.type != BDADDR_BREDR)
7493 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7494 					 MGMT_STATUS_INVALID_PARAMS,
7495 					 &rp, sizeof(rp));
7496 
7497 	hci_dev_lock(hdev);
7498 
7499 	if (!hdev_is_powered(hdev)) {
7500 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7501 					MGMT_STATUS_NOT_POWERED, &rp,
7502 					sizeof(rp));
7503 		goto unlock;
7504 	}
7505 
7506 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7507 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7508 					       &cp->addr.bdaddr);
7509 		if (!conn || conn->state != BT_CONNECTED) {
7510 			err = mgmt_cmd_complete(sk, hdev->id,
7511 						MGMT_OP_GET_CLOCK_INFO,
7512 						MGMT_STATUS_NOT_CONNECTED,
7513 						&rp, sizeof(rp));
7514 			goto unlock;
7515 		}
7516 	} else {
7517 		conn = NULL;
7518 	}
7519 
7520 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7521 	if (!cmd)
7522 		err = -ENOMEM;
7523 	else
7524 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7525 					 get_clock_info_complete);
7526 
7527 	if (err < 0) {
7528 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7529 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7530 
7531 		if (cmd)
7532 			mgmt_pending_free(cmd);
7533 	}
7534 
7535 
7536 unlock:
7537 	hci_dev_unlock(hdev);
7538 	return err;
7539 }
7540 
7541 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7542 {
7543 	struct hci_conn *conn;
7544 
7545 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7546 	if (!conn)
7547 		return false;
7548 
7549 	if (conn->dst_type != type)
7550 		return false;
7551 
7552 	if (conn->state != BT_CONNECTED)
7553 		return false;
7554 
7555 	return true;
7556 }
7557 
7558 /* This function requires the caller holds hdev->lock */
7559 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7560 			       u8 addr_type, u8 auto_connect)
7561 {
7562 	struct hci_conn_params *params;
7563 
7564 	params = hci_conn_params_add(hdev, addr, addr_type);
7565 	if (!params)
7566 		return -EIO;
7567 
7568 	if (params->auto_connect == auto_connect)
7569 		return 0;
7570 
7571 	hci_pend_le_list_del_init(params);
7572 
7573 	switch (auto_connect) {
7574 	case HCI_AUTO_CONN_DISABLED:
7575 	case HCI_AUTO_CONN_LINK_LOSS:
7576 		/* If auto connect is being disabled when we're trying to
7577 		 * connect to device, keep connecting.
7578 		 */
7579 		if (params->explicit_connect)
7580 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7581 		break;
7582 	case HCI_AUTO_CONN_REPORT:
7583 		if (params->explicit_connect)
7584 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7585 		else
7586 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7587 		break;
7588 	case HCI_AUTO_CONN_DIRECT:
7589 	case HCI_AUTO_CONN_ALWAYS:
7590 		if (!is_connected(hdev, addr, addr_type))
7591 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7592 		break;
7593 	}
7594 
7595 	params->auto_connect = auto_connect;
7596 
7597 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7598 		   addr, addr_type, auto_connect);
7599 
7600 	return 0;
7601 }
7602 
7603 static void device_added(struct sock *sk, struct hci_dev *hdev,
7604 			 bdaddr_t *bdaddr, u8 type, u8 action)
7605 {
7606 	struct mgmt_ev_device_added ev;
7607 
7608 	bacpy(&ev.addr.bdaddr, bdaddr);
7609 	ev.addr.type = type;
7610 	ev.action = action;
7611 
7612 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7613 }
7614 
7615 static void add_device_complete(struct hci_dev *hdev, void *data, int err)
7616 {
7617 	struct mgmt_pending_cmd *cmd = data;
7618 	struct mgmt_cp_add_device *cp = cmd->param;
7619 
7620 	if (!err) {
7621 		struct hci_conn_params *params;
7622 
7623 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7624 						le_addr_type(cp->addr.type));
7625 
7626 		device_added(cmd->sk, hdev, &cp->addr.bdaddr, cp->addr.type,
7627 			     cp->action);
7628 		device_flags_changed(NULL, hdev, &cp->addr.bdaddr,
7629 				     cp->addr.type, hdev->conn_flags,
7630 				     params ? params->flags : 0);
7631 	}
7632 
7633 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEVICE,
7634 			  mgmt_status(err), &cp->addr, sizeof(cp->addr));
7635 	mgmt_pending_free(cmd);
7636 }
7637 
7638 static int add_device_sync(struct hci_dev *hdev, void *data)
7639 {
7640 	return hci_update_passive_scan_sync(hdev);
7641 }
7642 
7643 static int add_device(struct sock *sk, struct hci_dev *hdev,
7644 		      void *data, u16 len)
7645 {
7646 	struct mgmt_pending_cmd *cmd;
7647 	struct mgmt_cp_add_device *cp = data;
7648 	u8 auto_conn, addr_type;
7649 	struct hci_conn_params *params;
7650 	int err;
7651 	u32 current_flags = 0;
7652 	u32 supported_flags;
7653 
7654 	bt_dev_dbg(hdev, "sock %p", sk);
7655 
7656 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7657 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7658 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7659 					 MGMT_STATUS_INVALID_PARAMS,
7660 					 &cp->addr, sizeof(cp->addr));
7661 
7662 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7663 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7664 					 MGMT_STATUS_INVALID_PARAMS,
7665 					 &cp->addr, sizeof(cp->addr));
7666 
7667 	hci_dev_lock(hdev);
7668 
7669 	if (cp->addr.type == BDADDR_BREDR) {
7670 		/* Only incoming connections action is supported for now */
7671 		if (cp->action != 0x01) {
7672 			err = mgmt_cmd_complete(sk, hdev->id,
7673 						MGMT_OP_ADD_DEVICE,
7674 						MGMT_STATUS_INVALID_PARAMS,
7675 						&cp->addr, sizeof(cp->addr));
7676 			goto unlock;
7677 		}
7678 
7679 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7680 						     &cp->addr.bdaddr,
7681 						     cp->addr.type, 0);
7682 		if (err)
7683 			goto unlock;
7684 
7685 		hci_update_scan(hdev);
7686 
7687 		goto added;
7688 	}
7689 
7690 	addr_type = le_addr_type(cp->addr.type);
7691 
7692 	if (cp->action == 0x02)
7693 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7694 	else if (cp->action == 0x01)
7695 		auto_conn = HCI_AUTO_CONN_DIRECT;
7696 	else
7697 		auto_conn = HCI_AUTO_CONN_REPORT;
7698 
7699 	/* Kernel internally uses conn_params with resolvable private
7700 	 * address, but Add Device allows only identity addresses.
7701 	 * Make sure it is enforced before calling
7702 	 * hci_conn_params_lookup.
7703 	 */
7704 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7705 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7706 					MGMT_STATUS_INVALID_PARAMS,
7707 					&cp->addr, sizeof(cp->addr));
7708 		goto unlock;
7709 	}
7710 
7711 	/* If the connection parameters don't exist for this device,
7712 	 * they will be created and configured with defaults.
7713 	 */
7714 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7715 				auto_conn) < 0) {
7716 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7717 					MGMT_STATUS_FAILED, &cp->addr,
7718 					sizeof(cp->addr));
7719 		goto unlock;
7720 	} else {
7721 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7722 						addr_type);
7723 		if (params)
7724 			current_flags = params->flags;
7725 	}
7726 
7727 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7728 	if (!cmd) {
7729 		err = -ENOMEM;
7730 		goto unlock;
7731 	}
7732 
7733 	err = hci_cmd_sync_queue(hdev, add_device_sync, cmd,
7734 				 add_device_complete);
7735 	if (err < 0) {
7736 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7737 					MGMT_STATUS_FAILED, &cp->addr,
7738 					sizeof(cp->addr));
7739 		mgmt_pending_free(cmd);
7740 	}
7741 
7742 	goto unlock;
7743 
7744 added:
7745 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7746 	supported_flags = hdev->conn_flags;
7747 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7748 			     supported_flags, current_flags);
7749 
7750 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7751 				MGMT_STATUS_SUCCESS, &cp->addr,
7752 				sizeof(cp->addr));
7753 
7754 unlock:
7755 	hci_dev_unlock(hdev);
7756 	return err;
7757 }
7758 
7759 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7760 			   bdaddr_t *bdaddr, u8 type)
7761 {
7762 	struct mgmt_ev_device_removed ev;
7763 
7764 	bacpy(&ev.addr.bdaddr, bdaddr);
7765 	ev.addr.type = type;
7766 
7767 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7768 }
7769 
7770 static int remove_device_sync(struct hci_dev *hdev, void *data)
7771 {
7772 	return hci_update_passive_scan_sync(hdev);
7773 }
7774 
7775 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7776 			 void *data, u16 len)
7777 {
7778 	struct mgmt_cp_remove_device *cp = data;
7779 	int err;
7780 
7781 	bt_dev_dbg(hdev, "sock %p", sk);
7782 
7783 	hci_dev_lock(hdev);
7784 
7785 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7786 		struct hci_conn_params *params;
7787 		u8 addr_type;
7788 
7789 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7790 			err = mgmt_cmd_complete(sk, hdev->id,
7791 						MGMT_OP_REMOVE_DEVICE,
7792 						MGMT_STATUS_INVALID_PARAMS,
7793 						&cp->addr, sizeof(cp->addr));
7794 			goto unlock;
7795 		}
7796 
7797 		if (cp->addr.type == BDADDR_BREDR) {
7798 			err = hci_bdaddr_list_del(&hdev->accept_list,
7799 						  &cp->addr.bdaddr,
7800 						  cp->addr.type);
7801 			if (err) {
7802 				err = mgmt_cmd_complete(sk, hdev->id,
7803 							MGMT_OP_REMOVE_DEVICE,
7804 							MGMT_STATUS_INVALID_PARAMS,
7805 							&cp->addr,
7806 							sizeof(cp->addr));
7807 				goto unlock;
7808 			}
7809 
7810 			hci_update_scan(hdev);
7811 
7812 			device_removed(sk, hdev, &cp->addr.bdaddr,
7813 				       cp->addr.type);
7814 			goto complete;
7815 		}
7816 
7817 		addr_type = le_addr_type(cp->addr.type);
7818 
7819 		/* Kernel internally uses conn_params with resolvable private
7820 		 * address, but Remove Device allows only identity addresses.
7821 		 * Make sure it is enforced before calling
7822 		 * hci_conn_params_lookup.
7823 		 */
7824 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7825 			err = mgmt_cmd_complete(sk, hdev->id,
7826 						MGMT_OP_REMOVE_DEVICE,
7827 						MGMT_STATUS_INVALID_PARAMS,
7828 						&cp->addr, sizeof(cp->addr));
7829 			goto unlock;
7830 		}
7831 
7832 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7833 						addr_type);
7834 		if (!params) {
7835 			err = mgmt_cmd_complete(sk, hdev->id,
7836 						MGMT_OP_REMOVE_DEVICE,
7837 						MGMT_STATUS_INVALID_PARAMS,
7838 						&cp->addr, sizeof(cp->addr));
7839 			goto unlock;
7840 		}
7841 
7842 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7843 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7844 			err = mgmt_cmd_complete(sk, hdev->id,
7845 						MGMT_OP_REMOVE_DEVICE,
7846 						MGMT_STATUS_INVALID_PARAMS,
7847 						&cp->addr, sizeof(cp->addr));
7848 			goto unlock;
7849 		}
7850 
7851 		hci_conn_params_free(params);
7852 
7853 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7854 	} else {
7855 		struct hci_conn_params *p, *tmp;
7856 		struct bdaddr_list *b, *btmp;
7857 
7858 		if (cp->addr.type) {
7859 			err = mgmt_cmd_complete(sk, hdev->id,
7860 						MGMT_OP_REMOVE_DEVICE,
7861 						MGMT_STATUS_INVALID_PARAMS,
7862 						&cp->addr, sizeof(cp->addr));
7863 			goto unlock;
7864 		}
7865 
7866 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7867 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7868 			list_del(&b->list);
7869 			kfree(b);
7870 		}
7871 
7872 		hci_update_scan(hdev);
7873 
7874 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7875 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7876 				continue;
7877 			device_removed(sk, hdev, &p->addr, p->addr_type);
7878 			if (p->explicit_connect) {
7879 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7880 				continue;
7881 			}
7882 			hci_conn_params_free(p);
7883 		}
7884 
7885 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7886 	}
7887 
7888 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7889 
7890 complete:
7891 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7892 				MGMT_STATUS_SUCCESS, &cp->addr,
7893 				sizeof(cp->addr));
7894 unlock:
7895 	hci_dev_unlock(hdev);
7896 	return err;
7897 }
7898 
7899 static int conn_update_sync(struct hci_dev *hdev, void *data)
7900 {
7901 	struct hci_conn_params *params = data;
7902 	struct hci_conn *conn;
7903 
7904 	conn = hci_conn_hash_lookup_le(hdev, &params->addr, params->addr_type);
7905 	if (!conn)
7906 		return -ECANCELED;
7907 
7908 	return hci_le_conn_update_sync(hdev, conn, params);
7909 }
7910 
7911 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7912 			   u16 len)
7913 {
7914 	struct mgmt_cp_load_conn_param *cp = data;
7915 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7916 				     sizeof(struct mgmt_conn_param));
7917 	u16 param_count, expected_len;
7918 	int i;
7919 
7920 	if (!lmp_le_capable(hdev))
7921 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7922 				       MGMT_STATUS_NOT_SUPPORTED);
7923 
7924 	param_count = __le16_to_cpu(cp->param_count);
7925 	if (param_count > max_param_count) {
7926 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7927 			   param_count);
7928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7929 				       MGMT_STATUS_INVALID_PARAMS);
7930 	}
7931 
7932 	expected_len = struct_size(cp, params, param_count);
7933 	if (expected_len != len) {
7934 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7935 			   expected_len, len);
7936 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7937 				       MGMT_STATUS_INVALID_PARAMS);
7938 	}
7939 
7940 	bt_dev_dbg(hdev, "param_count %u", param_count);
7941 
7942 	hci_dev_lock(hdev);
7943 
7944 	if (param_count > 1)
7945 		hci_conn_params_clear_disabled(hdev);
7946 
7947 	for (i = 0; i < param_count; i++) {
7948 		struct mgmt_conn_param *param = &cp->params[i];
7949 		struct hci_conn_params *hci_param;
7950 		u16 min, max, latency, timeout;
7951 		bool update = false;
7952 		u8 addr_type;
7953 
7954 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7955 			   param->addr.type);
7956 
7957 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7958 			addr_type = ADDR_LE_DEV_PUBLIC;
7959 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7960 			addr_type = ADDR_LE_DEV_RANDOM;
7961 		} else {
7962 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7963 			continue;
7964 		}
7965 
7966 		min = le16_to_cpu(param->min_interval);
7967 		max = le16_to_cpu(param->max_interval);
7968 		latency = le16_to_cpu(param->latency);
7969 		timeout = le16_to_cpu(param->timeout);
7970 
7971 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7972 			   min, max, latency, timeout);
7973 
7974 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7975 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7976 			continue;
7977 		}
7978 
7979 		/* Detect when the loading is for an existing parameter then
7980 		 * attempt to trigger the connection update procedure.
7981 		 */
7982 		if (!i && param_count == 1) {
7983 			hci_param = hci_conn_params_lookup(hdev,
7984 							   &param->addr.bdaddr,
7985 							   addr_type);
7986 			if (hci_param)
7987 				update = true;
7988 			else
7989 				hci_conn_params_clear_disabled(hdev);
7990 		}
7991 
7992 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7993 						addr_type);
7994 		if (!hci_param) {
7995 			bt_dev_err(hdev, "failed to add connection parameters");
7996 			continue;
7997 		}
7998 
7999 		hci_param->conn_min_interval = min;
8000 		hci_param->conn_max_interval = max;
8001 		hci_param->conn_latency = latency;
8002 		hci_param->supervision_timeout = timeout;
8003 
8004 		/* Check if we need to trigger a connection update */
8005 		if (update) {
8006 			struct hci_conn *conn;
8007 
8008 			/* Lookup for existing connection as central and check
8009 			 * if parameters match and if they don't then trigger
8010 			 * a connection update.
8011 			 */
8012 			conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr,
8013 						       addr_type);
8014 			if (conn && conn->role == HCI_ROLE_MASTER &&
8015 			    (conn->le_conn_min_interval != min ||
8016 			     conn->le_conn_max_interval != max ||
8017 			     conn->le_conn_latency != latency ||
8018 			     conn->le_supv_timeout != timeout))
8019 				hci_cmd_sync_queue(hdev, conn_update_sync,
8020 						   hci_param, NULL);
8021 		}
8022 	}
8023 
8024 	hci_dev_unlock(hdev);
8025 
8026 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
8027 				 NULL, 0);
8028 }
8029 
8030 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8031 			       void *data, u16 len)
8032 {
8033 	struct mgmt_cp_set_external_config *cp = data;
8034 	bool changed;
8035 	int err;
8036 
8037 	bt_dev_dbg(hdev, "sock %p", sk);
8038 
8039 	if (hdev_is_powered(hdev))
8040 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8041 				       MGMT_STATUS_REJECTED);
8042 
8043 	if (cp->config != 0x00 && cp->config != 0x01)
8044 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8045 				         MGMT_STATUS_INVALID_PARAMS);
8046 
8047 	if (!hci_test_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG))
8048 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8049 				       MGMT_STATUS_NOT_SUPPORTED);
8050 
8051 	hci_dev_lock(hdev);
8052 
8053 	if (cp->config)
8054 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
8055 	else
8056 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
8057 
8058 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8059 	if (err < 0)
8060 		goto unlock;
8061 
8062 	if (!changed)
8063 		goto unlock;
8064 
8065 	err = new_options(hdev, sk);
8066 
8067 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
8068 		mgmt_index_removed(hdev);
8069 
8070 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8071 			hci_dev_set_flag(hdev, HCI_CONFIG);
8072 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8073 
8074 			queue_work(hdev->req_workqueue, &hdev->power_on);
8075 		} else {
8076 			set_bit(HCI_RAW, &hdev->flags);
8077 			mgmt_index_added(hdev);
8078 		}
8079 	}
8080 
8081 unlock:
8082 	hci_dev_unlock(hdev);
8083 	return err;
8084 }
8085 
8086 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8087 			      void *data, u16 len)
8088 {
8089 	struct mgmt_cp_set_public_address *cp = data;
8090 	bool changed;
8091 	int err;
8092 
8093 	bt_dev_dbg(hdev, "sock %p", sk);
8094 
8095 	if (hdev_is_powered(hdev))
8096 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8097 				       MGMT_STATUS_REJECTED);
8098 
8099 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8100 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8101 				       MGMT_STATUS_INVALID_PARAMS);
8102 
8103 	if (!hdev->set_bdaddr)
8104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8105 				       MGMT_STATUS_NOT_SUPPORTED);
8106 
8107 	hci_dev_lock(hdev);
8108 
8109 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8110 	bacpy(&hdev->public_addr, &cp->bdaddr);
8111 
8112 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8113 	if (err < 0)
8114 		goto unlock;
8115 
8116 	if (!changed)
8117 		goto unlock;
8118 
8119 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8120 		err = new_options(hdev, sk);
8121 
8122 	if (is_configured(hdev)) {
8123 		mgmt_index_removed(hdev);
8124 
8125 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8126 
8127 		hci_dev_set_flag(hdev, HCI_CONFIG);
8128 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8129 
8130 		queue_work(hdev->req_workqueue, &hdev->power_on);
8131 	}
8132 
8133 unlock:
8134 	hci_dev_unlock(hdev);
8135 	return err;
8136 }
8137 
8138 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8139 					     int err)
8140 {
8141 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8142 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8143 	u8 *h192, *r192, *h256, *r256;
8144 	struct mgmt_pending_cmd *cmd = data;
8145 	struct sk_buff *skb = cmd->skb;
8146 	u8 status = mgmt_status(err);
8147 	u16 eir_len;
8148 
8149 	if (!status) {
8150 		if (!skb)
8151 			status = MGMT_STATUS_FAILED;
8152 		else if (IS_ERR(skb))
8153 			status = mgmt_status(PTR_ERR(skb));
8154 		else
8155 			status = mgmt_status(skb->data[0]);
8156 	}
8157 
8158 	bt_dev_dbg(hdev, "status %u", status);
8159 
8160 	mgmt_cp = cmd->param;
8161 
8162 	if (status) {
8163 		status = mgmt_status(status);
8164 		eir_len = 0;
8165 
8166 		h192 = NULL;
8167 		r192 = NULL;
8168 		h256 = NULL;
8169 		r256 = NULL;
8170 	} else if (!bredr_sc_enabled(hdev)) {
8171 		struct hci_rp_read_local_oob_data *rp;
8172 
8173 		if (skb->len != sizeof(*rp)) {
8174 			status = MGMT_STATUS_FAILED;
8175 			eir_len = 0;
8176 		} else {
8177 			status = MGMT_STATUS_SUCCESS;
8178 			rp = (void *)skb->data;
8179 
8180 			eir_len = 5 + 18 + 18;
8181 			h192 = rp->hash;
8182 			r192 = rp->rand;
8183 			h256 = NULL;
8184 			r256 = NULL;
8185 		}
8186 	} else {
8187 		struct hci_rp_read_local_oob_ext_data *rp;
8188 
8189 		if (skb->len != sizeof(*rp)) {
8190 			status = MGMT_STATUS_FAILED;
8191 			eir_len = 0;
8192 		} else {
8193 			status = MGMT_STATUS_SUCCESS;
8194 			rp = (void *)skb->data;
8195 
8196 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8197 				eir_len = 5 + 18 + 18;
8198 				h192 = NULL;
8199 				r192 = NULL;
8200 			} else {
8201 				eir_len = 5 + 18 + 18 + 18 + 18;
8202 				h192 = rp->hash192;
8203 				r192 = rp->rand192;
8204 			}
8205 
8206 			h256 = rp->hash256;
8207 			r256 = rp->rand256;
8208 		}
8209 	}
8210 
8211 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8212 	if (!mgmt_rp)
8213 		goto done;
8214 
8215 	if (eir_len == 0)
8216 		goto send_rsp;
8217 
8218 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8219 				  hdev->dev_class, 3);
8220 
8221 	if (h192 && r192) {
8222 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8223 					  EIR_SSP_HASH_C192, h192, 16);
8224 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8225 					  EIR_SSP_RAND_R192, r192, 16);
8226 	}
8227 
8228 	if (h256 && r256) {
8229 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8230 					  EIR_SSP_HASH_C256, h256, 16);
8231 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8232 					  EIR_SSP_RAND_R256, r256, 16);
8233 	}
8234 
8235 send_rsp:
8236 	mgmt_rp->type = mgmt_cp->type;
8237 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8238 
8239 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8240 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8241 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8242 	if (err < 0 || status)
8243 		goto done;
8244 
8245 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8246 
8247 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8248 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8249 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8250 done:
8251 	if (skb && !IS_ERR(skb))
8252 		kfree_skb(skb);
8253 
8254 	kfree(mgmt_rp);
8255 	mgmt_pending_free(cmd);
8256 }
8257 
8258 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8259 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8260 {
8261 	struct mgmt_pending_cmd *cmd;
8262 	int err;
8263 
8264 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8265 			       cp, sizeof(*cp));
8266 	if (!cmd)
8267 		return -ENOMEM;
8268 
8269 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8270 				 read_local_oob_ext_data_complete);
8271 
8272 	if (err < 0) {
8273 		mgmt_pending_remove(cmd);
8274 		return err;
8275 	}
8276 
8277 	return 0;
8278 }
8279 
8280 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8281 				   void *data, u16 data_len)
8282 {
8283 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8284 	struct mgmt_rp_read_local_oob_ext_data *rp;
8285 	size_t rp_len;
8286 	u16 eir_len;
8287 	u8 status, flags, role, addr[7], hash[16], rand[16];
8288 	int err;
8289 
8290 	bt_dev_dbg(hdev, "sock %p", sk);
8291 
8292 	if (hdev_is_powered(hdev)) {
8293 		switch (cp->type) {
8294 		case BIT(BDADDR_BREDR):
8295 			status = mgmt_bredr_support(hdev);
8296 			if (status)
8297 				eir_len = 0;
8298 			else
8299 				eir_len = 5;
8300 			break;
8301 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8302 			status = mgmt_le_support(hdev);
8303 			if (status)
8304 				eir_len = 0;
8305 			else
8306 				eir_len = 9 + 3 + 18 + 18 + 3;
8307 			break;
8308 		default:
8309 			status = MGMT_STATUS_INVALID_PARAMS;
8310 			eir_len = 0;
8311 			break;
8312 		}
8313 	} else {
8314 		status = MGMT_STATUS_NOT_POWERED;
8315 		eir_len = 0;
8316 	}
8317 
8318 	rp_len = sizeof(*rp) + eir_len;
8319 	rp = kmalloc(rp_len, GFP_ATOMIC);
8320 	if (!rp)
8321 		return -ENOMEM;
8322 
8323 	if (!status && !lmp_ssp_capable(hdev)) {
8324 		status = MGMT_STATUS_NOT_SUPPORTED;
8325 		eir_len = 0;
8326 	}
8327 
8328 	if (status)
8329 		goto complete;
8330 
8331 	hci_dev_lock(hdev);
8332 
8333 	eir_len = 0;
8334 	switch (cp->type) {
8335 	case BIT(BDADDR_BREDR):
8336 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8337 			err = read_local_ssp_oob_req(hdev, sk, cp);
8338 			hci_dev_unlock(hdev);
8339 			if (!err)
8340 				goto done;
8341 
8342 			status = MGMT_STATUS_FAILED;
8343 			goto complete;
8344 		} else {
8345 			eir_len = eir_append_data(rp->eir, eir_len,
8346 						  EIR_CLASS_OF_DEV,
8347 						  hdev->dev_class, 3);
8348 		}
8349 		break;
8350 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8351 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8352 		    smp_generate_oob(hdev, hash, rand) < 0) {
8353 			hci_dev_unlock(hdev);
8354 			status = MGMT_STATUS_FAILED;
8355 			goto complete;
8356 		}
8357 
8358 		/* This should return the active RPA, but since the RPA
8359 		 * is only programmed on demand, it is really hard to fill
8360 		 * this in at the moment. For now disallow retrieving
8361 		 * local out-of-band data when privacy is in use.
8362 		 *
8363 		 * Returning the identity address will not help here since
8364 		 * pairing happens before the identity resolving key is
8365 		 * known and thus the connection establishment happens
8366 		 * based on the RPA and not the identity address.
8367 		 */
8368 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8369 			hci_dev_unlock(hdev);
8370 			status = MGMT_STATUS_REJECTED;
8371 			goto complete;
8372 		}
8373 
8374 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8375 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8376 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8377 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8378 			memcpy(addr, &hdev->static_addr, 6);
8379 			addr[6] = 0x01;
8380 		} else {
8381 			memcpy(addr, &hdev->bdaddr, 6);
8382 			addr[6] = 0x00;
8383 		}
8384 
8385 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8386 					  addr, sizeof(addr));
8387 
8388 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8389 			role = 0x02;
8390 		else
8391 			role = 0x01;
8392 
8393 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8394 					  &role, sizeof(role));
8395 
8396 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8397 			eir_len = eir_append_data(rp->eir, eir_len,
8398 						  EIR_LE_SC_CONFIRM,
8399 						  hash, sizeof(hash));
8400 
8401 			eir_len = eir_append_data(rp->eir, eir_len,
8402 						  EIR_LE_SC_RANDOM,
8403 						  rand, sizeof(rand));
8404 		}
8405 
8406 		flags = mgmt_get_adv_discov_flags(hdev);
8407 
8408 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8409 			flags |= LE_AD_NO_BREDR;
8410 
8411 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8412 					  &flags, sizeof(flags));
8413 		break;
8414 	}
8415 
8416 	hci_dev_unlock(hdev);
8417 
8418 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8419 
8420 	status = MGMT_STATUS_SUCCESS;
8421 
8422 complete:
8423 	rp->type = cp->type;
8424 	rp->eir_len = cpu_to_le16(eir_len);
8425 
8426 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8427 				status, rp, sizeof(*rp) + eir_len);
8428 	if (err < 0 || status)
8429 		goto done;
8430 
8431 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8432 				 rp, sizeof(*rp) + eir_len,
8433 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8434 
8435 done:
8436 	kfree(rp);
8437 
8438 	return err;
8439 }
8440 
8441 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8442 {
8443 	u32 flags = 0;
8444 
8445 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8446 	flags |= MGMT_ADV_FLAG_DISCOV;
8447 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8448 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8449 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8450 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8451 	flags |= MGMT_ADV_PARAM_DURATION;
8452 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8453 	flags |= MGMT_ADV_PARAM_INTERVALS;
8454 	flags |= MGMT_ADV_PARAM_TX_POWER;
8455 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8456 
8457 	/* In extended adv TX_POWER returned from Set Adv Param
8458 	 * will be always valid.
8459 	 */
8460 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8461 		flags |= MGMT_ADV_FLAG_TX_POWER;
8462 
8463 	if (ext_adv_capable(hdev)) {
8464 		flags |= MGMT_ADV_FLAG_SEC_1M;
8465 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8466 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8467 
8468 		if (le_2m_capable(hdev))
8469 			flags |= MGMT_ADV_FLAG_SEC_2M;
8470 
8471 		if (le_coded_capable(hdev))
8472 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8473 	}
8474 
8475 	return flags;
8476 }
8477 
8478 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8479 			     void *data, u16 data_len)
8480 {
8481 	struct mgmt_rp_read_adv_features *rp;
8482 	size_t rp_len;
8483 	int err;
8484 	struct adv_info *adv_instance;
8485 	u32 supported_flags;
8486 	u8 *instance;
8487 
8488 	bt_dev_dbg(hdev, "sock %p", sk);
8489 
8490 	if (!lmp_le_capable(hdev))
8491 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8492 				       MGMT_STATUS_REJECTED);
8493 
8494 	hci_dev_lock(hdev);
8495 
8496 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8497 	rp = kmalloc(rp_len, GFP_ATOMIC);
8498 	if (!rp) {
8499 		hci_dev_unlock(hdev);
8500 		return -ENOMEM;
8501 	}
8502 
8503 	supported_flags = get_supported_adv_flags(hdev);
8504 
8505 	rp->supported_flags = cpu_to_le32(supported_flags);
8506 	rp->max_adv_data_len = max_adv_len(hdev);
8507 	rp->max_scan_rsp_len = max_adv_len(hdev);
8508 	rp->max_instances = hdev->le_num_of_adv_sets;
8509 	rp->num_instances = hdev->adv_instance_cnt;
8510 
8511 	instance = rp->instance;
8512 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8513 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8514 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8515 			*instance = adv_instance->instance;
8516 			instance++;
8517 		} else {
8518 			rp->num_instances--;
8519 			rp_len--;
8520 		}
8521 	}
8522 
8523 	hci_dev_unlock(hdev);
8524 
8525 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8526 				MGMT_STATUS_SUCCESS, rp, rp_len);
8527 
8528 	kfree(rp);
8529 
8530 	return err;
8531 }
8532 
8533 static u8 calculate_name_len(struct hci_dev *hdev)
8534 {
8535 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8536 
8537 	return eir_append_local_name(hdev, buf, 0);
8538 }
8539 
8540 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8541 			   bool is_adv_data)
8542 {
8543 	u8 max_len = max_adv_len(hdev);
8544 
8545 	if (is_adv_data) {
8546 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8547 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8548 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8549 			max_len -= 3;
8550 
8551 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8552 			max_len -= 3;
8553 	} else {
8554 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8555 			max_len -= calculate_name_len(hdev);
8556 
8557 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8558 			max_len -= 4;
8559 	}
8560 
8561 	return max_len;
8562 }
8563 
8564 static bool flags_managed(u32 adv_flags)
8565 {
8566 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8567 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8568 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8569 }
8570 
8571 static bool tx_power_managed(u32 adv_flags)
8572 {
8573 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8574 }
8575 
8576 static bool name_managed(u32 adv_flags)
8577 {
8578 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8579 }
8580 
8581 static bool appearance_managed(u32 adv_flags)
8582 {
8583 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8584 }
8585 
8586 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8587 			      u8 len, bool is_adv_data)
8588 {
8589 	int i, cur_len;
8590 	u8 max_len;
8591 
8592 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8593 
8594 	if (len > max_len)
8595 		return false;
8596 
8597 	/* Make sure that the data is correctly formatted. */
8598 	for (i = 0; i < len; i += (cur_len + 1)) {
8599 		cur_len = data[i];
8600 
8601 		if (!cur_len)
8602 			continue;
8603 
8604 		if (data[i + 1] == EIR_FLAGS &&
8605 		    (!is_adv_data || flags_managed(adv_flags)))
8606 			return false;
8607 
8608 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8609 			return false;
8610 
8611 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8612 			return false;
8613 
8614 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8615 			return false;
8616 
8617 		if (data[i + 1] == EIR_APPEARANCE &&
8618 		    appearance_managed(adv_flags))
8619 			return false;
8620 
8621 		/* If the current field length would exceed the total data
8622 		 * length, then it's invalid.
8623 		 */
8624 		if (i + cur_len >= len)
8625 			return false;
8626 	}
8627 
8628 	return true;
8629 }
8630 
8631 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8632 {
8633 	u32 supported_flags, phy_flags;
8634 
8635 	/* The current implementation only supports a subset of the specified
8636 	 * flags. Also need to check mutual exclusiveness of sec flags.
8637 	 */
8638 	supported_flags = get_supported_adv_flags(hdev);
8639 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8640 	if (adv_flags & ~supported_flags ||
8641 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8642 		return false;
8643 
8644 	return true;
8645 }
8646 
8647 static bool adv_busy(struct hci_dev *hdev)
8648 {
8649 	return pending_find(MGMT_OP_SET_LE, hdev);
8650 }
8651 
8652 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8653 			     int err)
8654 {
8655 	struct adv_info *adv, *n;
8656 
8657 	bt_dev_dbg(hdev, "err %d", err);
8658 
8659 	hci_dev_lock(hdev);
8660 
8661 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8662 		u8 instance;
8663 
8664 		if (!adv->pending)
8665 			continue;
8666 
8667 		if (!err) {
8668 			adv->pending = false;
8669 			continue;
8670 		}
8671 
8672 		instance = adv->instance;
8673 
8674 		if (hdev->cur_adv_instance == instance)
8675 			cancel_adv_timeout(hdev);
8676 
8677 		hci_remove_adv_instance(hdev, instance);
8678 		mgmt_advertising_removed(sk, hdev, instance);
8679 	}
8680 
8681 	hci_dev_unlock(hdev);
8682 }
8683 
8684 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8685 {
8686 	struct mgmt_pending_cmd *cmd = data;
8687 	struct mgmt_cp_add_advertising *cp = cmd->param;
8688 	struct mgmt_rp_add_advertising rp;
8689 
8690 	memset(&rp, 0, sizeof(rp));
8691 
8692 	rp.instance = cp->instance;
8693 
8694 	if (err)
8695 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8696 				mgmt_status(err));
8697 	else
8698 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8699 				  mgmt_status(err), &rp, sizeof(rp));
8700 
8701 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8702 
8703 	mgmt_pending_free(cmd);
8704 }
8705 
8706 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8707 {
8708 	struct mgmt_pending_cmd *cmd = data;
8709 	struct mgmt_cp_add_advertising *cp = cmd->param;
8710 
8711 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8712 }
8713 
8714 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8715 			   void *data, u16 data_len)
8716 {
8717 	struct mgmt_cp_add_advertising *cp = data;
8718 	struct mgmt_rp_add_advertising rp;
8719 	u32 flags;
8720 	u8 status;
8721 	u16 timeout, duration;
8722 	unsigned int prev_instance_cnt;
8723 	u8 schedule_instance = 0;
8724 	struct adv_info *adv, *next_instance;
8725 	int err;
8726 	struct mgmt_pending_cmd *cmd;
8727 
8728 	bt_dev_dbg(hdev, "sock %p", sk);
8729 
8730 	status = mgmt_le_support(hdev);
8731 	if (status)
8732 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8733 				       status);
8734 
8735 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8736 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8737 				       MGMT_STATUS_INVALID_PARAMS);
8738 
8739 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8740 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8741 				       MGMT_STATUS_INVALID_PARAMS);
8742 
8743 	flags = __le32_to_cpu(cp->flags);
8744 	timeout = __le16_to_cpu(cp->timeout);
8745 	duration = __le16_to_cpu(cp->duration);
8746 
8747 	if (!requested_adv_flags_are_valid(hdev, flags))
8748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8749 				       MGMT_STATUS_INVALID_PARAMS);
8750 
8751 	hci_dev_lock(hdev);
8752 
8753 	if (timeout && !hdev_is_powered(hdev)) {
8754 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8755 				      MGMT_STATUS_REJECTED);
8756 		goto unlock;
8757 	}
8758 
8759 	if (adv_busy(hdev)) {
8760 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8761 				      MGMT_STATUS_BUSY);
8762 		goto unlock;
8763 	}
8764 
8765 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8766 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8767 			       cp->scan_rsp_len, false)) {
8768 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8769 				      MGMT_STATUS_INVALID_PARAMS);
8770 		goto unlock;
8771 	}
8772 
8773 	prev_instance_cnt = hdev->adv_instance_cnt;
8774 
8775 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8776 				   cp->adv_data_len, cp->data,
8777 				   cp->scan_rsp_len,
8778 				   cp->data + cp->adv_data_len,
8779 				   timeout, duration,
8780 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8781 				   hdev->le_adv_min_interval,
8782 				   hdev->le_adv_max_interval, 0);
8783 	if (IS_ERR(adv)) {
8784 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8785 				      MGMT_STATUS_FAILED);
8786 		goto unlock;
8787 	}
8788 
8789 	/* Only trigger an advertising added event if a new instance was
8790 	 * actually added.
8791 	 */
8792 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8793 		mgmt_advertising_added(sk, hdev, cp->instance);
8794 
8795 	if (hdev->cur_adv_instance == cp->instance) {
8796 		/* If the currently advertised instance is being changed then
8797 		 * cancel the current advertising and schedule the next
8798 		 * instance. If there is only one instance then the overridden
8799 		 * advertising data will be visible right away.
8800 		 */
8801 		cancel_adv_timeout(hdev);
8802 
8803 		next_instance = hci_get_next_instance(hdev, cp->instance);
8804 		if (next_instance)
8805 			schedule_instance = next_instance->instance;
8806 	} else if (!hdev->adv_instance_timeout) {
8807 		/* Immediately advertise the new instance if no other
8808 		 * instance is currently being advertised.
8809 		 */
8810 		schedule_instance = cp->instance;
8811 	}
8812 
8813 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8814 	 * there is no instance to be advertised then we have no HCI
8815 	 * communication to make. Simply return.
8816 	 */
8817 	if (!hdev_is_powered(hdev) ||
8818 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8819 	    !schedule_instance) {
8820 		rp.instance = cp->instance;
8821 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8822 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8823 		goto unlock;
8824 	}
8825 
8826 	/* We're good to go, update advertising data, parameters, and start
8827 	 * advertising.
8828 	 */
8829 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8830 			       data_len);
8831 	if (!cmd) {
8832 		err = -ENOMEM;
8833 		goto unlock;
8834 	}
8835 
8836 	cp->instance = schedule_instance;
8837 
8838 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8839 				 add_advertising_complete);
8840 	if (err < 0)
8841 		mgmt_pending_free(cmd);
8842 
8843 unlock:
8844 	hci_dev_unlock(hdev);
8845 
8846 	return err;
8847 }
8848 
8849 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8850 					int err)
8851 {
8852 	struct mgmt_pending_cmd *cmd = data;
8853 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8854 	struct mgmt_rp_add_ext_adv_params rp;
8855 	struct adv_info *adv;
8856 	u32 flags;
8857 
8858 	BT_DBG("%s", hdev->name);
8859 
8860 	hci_dev_lock(hdev);
8861 
8862 	adv = hci_find_adv_instance(hdev, cp->instance);
8863 	if (!adv)
8864 		goto unlock;
8865 
8866 	rp.instance = cp->instance;
8867 	rp.tx_power = adv->tx_power;
8868 
8869 	/* While we're at it, inform userspace of the available space for this
8870 	 * advertisement, given the flags that will be used.
8871 	 */
8872 	flags = __le32_to_cpu(cp->flags);
8873 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8874 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8875 
8876 	if (err) {
8877 		/* If this advertisement was previously advertising and we
8878 		 * failed to update it, we signal that it has been removed and
8879 		 * delete its structure
8880 		 */
8881 		if (!adv->pending)
8882 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8883 
8884 		hci_remove_adv_instance(hdev, cp->instance);
8885 
8886 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
8887 				mgmt_status(err));
8888 	} else {
8889 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
8890 				  mgmt_status(err), &rp, sizeof(rp));
8891 	}
8892 
8893 unlock:
8894 	mgmt_pending_free(cmd);
8895 
8896 	hci_dev_unlock(hdev);
8897 }
8898 
8899 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8900 {
8901 	struct mgmt_pending_cmd *cmd = data;
8902 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8903 
8904 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8905 }
8906 
8907 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8908 			      void *data, u16 data_len)
8909 {
8910 	struct mgmt_cp_add_ext_adv_params *cp = data;
8911 	struct mgmt_rp_add_ext_adv_params rp;
8912 	struct mgmt_pending_cmd *cmd = NULL;
8913 	struct adv_info *adv;
8914 	u32 flags, min_interval, max_interval;
8915 	u16 timeout, duration;
8916 	u8 status;
8917 	s8 tx_power;
8918 	int err;
8919 
8920 	BT_DBG("%s", hdev->name);
8921 
8922 	status = mgmt_le_support(hdev);
8923 	if (status)
8924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8925 				       status);
8926 
8927 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8929 				       MGMT_STATUS_INVALID_PARAMS);
8930 
8931 	/* The purpose of breaking add_advertising into two separate MGMT calls
8932 	 * for params and data is to allow more parameters to be added to this
8933 	 * structure in the future. For this reason, we verify that we have the
8934 	 * bare minimum structure we know of when the interface was defined. Any
8935 	 * extra parameters we don't know about will be ignored in this request.
8936 	 */
8937 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8939 				       MGMT_STATUS_INVALID_PARAMS);
8940 
8941 	flags = __le32_to_cpu(cp->flags);
8942 
8943 	if (!requested_adv_flags_are_valid(hdev, flags))
8944 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8945 				       MGMT_STATUS_INVALID_PARAMS);
8946 
8947 	hci_dev_lock(hdev);
8948 
8949 	/* In new interface, we require that we are powered to register */
8950 	if (!hdev_is_powered(hdev)) {
8951 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8952 				      MGMT_STATUS_REJECTED);
8953 		goto unlock;
8954 	}
8955 
8956 	if (adv_busy(hdev)) {
8957 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8958 				      MGMT_STATUS_BUSY);
8959 		goto unlock;
8960 	}
8961 
8962 	/* Parse defined parameters from request, use defaults otherwise */
8963 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8964 		  __le16_to_cpu(cp->timeout) : 0;
8965 
8966 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8967 		   __le16_to_cpu(cp->duration) :
8968 		   hdev->def_multi_adv_rotation_duration;
8969 
8970 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8971 		       __le32_to_cpu(cp->min_interval) :
8972 		       hdev->le_adv_min_interval;
8973 
8974 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8975 		       __le32_to_cpu(cp->max_interval) :
8976 		       hdev->le_adv_max_interval;
8977 
8978 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8979 		   cp->tx_power :
8980 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8981 
8982 	/* Create advertising instance with no advertising or response data */
8983 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8984 				   timeout, duration, tx_power, min_interval,
8985 				   max_interval, 0);
8986 
8987 	if (IS_ERR(adv)) {
8988 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8989 				      MGMT_STATUS_FAILED);
8990 		goto unlock;
8991 	}
8992 
8993 	/* Submit request for advertising params if ext adv available */
8994 	if (ext_adv_capable(hdev)) {
8995 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8996 				       data, data_len);
8997 		if (!cmd) {
8998 			err = -ENOMEM;
8999 			hci_remove_adv_instance(hdev, cp->instance);
9000 			goto unlock;
9001 		}
9002 
9003 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
9004 					 add_ext_adv_params_complete);
9005 		if (err < 0)
9006 			mgmt_pending_free(cmd);
9007 	} else {
9008 		rp.instance = cp->instance;
9009 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
9010 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9011 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9012 		err = mgmt_cmd_complete(sk, hdev->id,
9013 					MGMT_OP_ADD_EXT_ADV_PARAMS,
9014 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9015 	}
9016 
9017 unlock:
9018 	hci_dev_unlock(hdev);
9019 
9020 	return err;
9021 }
9022 
9023 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
9024 {
9025 	struct mgmt_pending_cmd *cmd = data;
9026 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9027 	struct mgmt_rp_add_advertising rp;
9028 
9029 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
9030 
9031 	memset(&rp, 0, sizeof(rp));
9032 
9033 	rp.instance = cp->instance;
9034 
9035 	if (err)
9036 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9037 				mgmt_status(err));
9038 	else
9039 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9040 				  mgmt_status(err), &rp, sizeof(rp));
9041 
9042 	mgmt_pending_free(cmd);
9043 }
9044 
9045 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
9046 {
9047 	struct mgmt_pending_cmd *cmd = data;
9048 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
9049 	int err;
9050 
9051 	if (ext_adv_capable(hdev)) {
9052 		err = hci_update_adv_data_sync(hdev, cp->instance);
9053 		if (err)
9054 			return err;
9055 
9056 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
9057 		if (err)
9058 			return err;
9059 
9060 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
9061 	}
9062 
9063 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
9064 }
9065 
9066 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9067 			    u16 data_len)
9068 {
9069 	struct mgmt_cp_add_ext_adv_data *cp = data;
9070 	struct mgmt_rp_add_ext_adv_data rp;
9071 	u8 schedule_instance = 0;
9072 	struct adv_info *next_instance;
9073 	struct adv_info *adv_instance;
9074 	int err = 0;
9075 	struct mgmt_pending_cmd *cmd;
9076 
9077 	BT_DBG("%s", hdev->name);
9078 
9079 	hci_dev_lock(hdev);
9080 
9081 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9082 
9083 	if (!adv_instance) {
9084 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9085 				      MGMT_STATUS_INVALID_PARAMS);
9086 		goto unlock;
9087 	}
9088 
9089 	/* In new interface, we require that we are powered to register */
9090 	if (!hdev_is_powered(hdev)) {
9091 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9092 				      MGMT_STATUS_REJECTED);
9093 		goto clear_new_instance;
9094 	}
9095 
9096 	if (adv_busy(hdev)) {
9097 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9098 				      MGMT_STATUS_BUSY);
9099 		goto clear_new_instance;
9100 	}
9101 
9102 	/* Validate new data */
9103 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9104 			       cp->adv_data_len, true) ||
9105 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9106 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9107 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9108 				      MGMT_STATUS_INVALID_PARAMS);
9109 		goto clear_new_instance;
9110 	}
9111 
9112 	/* Set the data in the advertising instance */
9113 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9114 				  cp->data, cp->scan_rsp_len,
9115 				  cp->data + cp->adv_data_len);
9116 
9117 	/* If using software rotation, determine next instance to use */
9118 	if (hdev->cur_adv_instance == cp->instance) {
9119 		/* If the currently advertised instance is being changed
9120 		 * then cancel the current advertising and schedule the
9121 		 * next instance. If there is only one instance then the
9122 		 * overridden advertising data will be visible right
9123 		 * away
9124 		 */
9125 		cancel_adv_timeout(hdev);
9126 
9127 		next_instance = hci_get_next_instance(hdev, cp->instance);
9128 		if (next_instance)
9129 			schedule_instance = next_instance->instance;
9130 	} else if (!hdev->adv_instance_timeout) {
9131 		/* Immediately advertise the new instance if no other
9132 		 * instance is currently being advertised.
9133 		 */
9134 		schedule_instance = cp->instance;
9135 	}
9136 
9137 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9138 	 * be advertised then we have no HCI communication to make.
9139 	 * Simply return.
9140 	 */
9141 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9142 		if (adv_instance->pending) {
9143 			mgmt_advertising_added(sk, hdev, cp->instance);
9144 			adv_instance->pending = false;
9145 		}
9146 		rp.instance = cp->instance;
9147 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9148 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9149 		goto unlock;
9150 	}
9151 
9152 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9153 			       data_len);
9154 	if (!cmd) {
9155 		err = -ENOMEM;
9156 		goto clear_new_instance;
9157 	}
9158 
9159 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9160 				 add_ext_adv_data_complete);
9161 	if (err < 0) {
9162 		mgmt_pending_free(cmd);
9163 		goto clear_new_instance;
9164 	}
9165 
9166 	/* We were successful in updating data, so trigger advertising_added
9167 	 * event if this is an instance that wasn't previously advertising. If
9168 	 * a failure occurs in the requests we initiated, we will remove the
9169 	 * instance again in add_advertising_complete
9170 	 */
9171 	if (adv_instance->pending)
9172 		mgmt_advertising_added(sk, hdev, cp->instance);
9173 
9174 	goto unlock;
9175 
9176 clear_new_instance:
9177 	hci_remove_adv_instance(hdev, cp->instance);
9178 
9179 unlock:
9180 	hci_dev_unlock(hdev);
9181 
9182 	return err;
9183 }
9184 
9185 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9186 					int err)
9187 {
9188 	struct mgmt_pending_cmd *cmd = data;
9189 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9190 	struct mgmt_rp_remove_advertising rp;
9191 
9192 	bt_dev_dbg(hdev, "err %d", err);
9193 
9194 	memset(&rp, 0, sizeof(rp));
9195 	rp.instance = cp->instance;
9196 
9197 	if (err)
9198 		mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode,
9199 				mgmt_status(err));
9200 	else
9201 		mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode,
9202 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9203 
9204 	mgmt_pending_free(cmd);
9205 }
9206 
9207 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9208 {
9209 	struct mgmt_pending_cmd *cmd = data;
9210 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9211 	int err;
9212 
9213 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9214 	if (err)
9215 		return err;
9216 
9217 	if (list_empty(&hdev->adv_instances))
9218 		err = hci_disable_advertising_sync(hdev);
9219 
9220 	return err;
9221 }
9222 
9223 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9224 			      void *data, u16 data_len)
9225 {
9226 	struct mgmt_cp_remove_advertising *cp = data;
9227 	struct mgmt_pending_cmd *cmd;
9228 	int err;
9229 
9230 	bt_dev_dbg(hdev, "sock %p", sk);
9231 
9232 	hci_dev_lock(hdev);
9233 
9234 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9235 		err = mgmt_cmd_status(sk, hdev->id,
9236 				      MGMT_OP_REMOVE_ADVERTISING,
9237 				      MGMT_STATUS_INVALID_PARAMS);
9238 		goto unlock;
9239 	}
9240 
9241 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9242 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9243 				      MGMT_STATUS_BUSY);
9244 		goto unlock;
9245 	}
9246 
9247 	if (list_empty(&hdev->adv_instances)) {
9248 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9249 				      MGMT_STATUS_INVALID_PARAMS);
9250 		goto unlock;
9251 	}
9252 
9253 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9254 			       data_len);
9255 	if (!cmd) {
9256 		err = -ENOMEM;
9257 		goto unlock;
9258 	}
9259 
9260 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9261 				 remove_advertising_complete);
9262 	if (err < 0)
9263 		mgmt_pending_free(cmd);
9264 
9265 unlock:
9266 	hci_dev_unlock(hdev);
9267 
9268 	return err;
9269 }
9270 
9271 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9272 			     void *data, u16 data_len)
9273 {
9274 	struct mgmt_cp_get_adv_size_info *cp = data;
9275 	struct mgmt_rp_get_adv_size_info rp;
9276 	u32 flags, supported_flags;
9277 
9278 	bt_dev_dbg(hdev, "sock %p", sk);
9279 
9280 	if (!lmp_le_capable(hdev))
9281 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9282 				       MGMT_STATUS_REJECTED);
9283 
9284 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9285 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9286 				       MGMT_STATUS_INVALID_PARAMS);
9287 
9288 	flags = __le32_to_cpu(cp->flags);
9289 
9290 	/* The current implementation only supports a subset of the specified
9291 	 * flags.
9292 	 */
9293 	supported_flags = get_supported_adv_flags(hdev);
9294 	if (flags & ~supported_flags)
9295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9296 				       MGMT_STATUS_INVALID_PARAMS);
9297 
9298 	rp.instance = cp->instance;
9299 	rp.flags = cp->flags;
9300 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9301 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9302 
9303 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9304 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9305 }
9306 
9307 static const struct hci_mgmt_handler mgmt_handlers[] = {
9308 	{ NULL }, /* 0x0000 (no command) */
9309 	{ read_version,            MGMT_READ_VERSION_SIZE,
9310 						HCI_MGMT_NO_HDEV |
9311 						HCI_MGMT_UNTRUSTED },
9312 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9313 						HCI_MGMT_NO_HDEV |
9314 						HCI_MGMT_UNTRUSTED },
9315 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9316 						HCI_MGMT_NO_HDEV |
9317 						HCI_MGMT_UNTRUSTED },
9318 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9319 						HCI_MGMT_UNTRUSTED },
9320 	{ set_powered,             MGMT_SETTING_SIZE },
9321 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9322 	{ set_connectable,         MGMT_SETTING_SIZE },
9323 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9324 	{ set_bondable,            MGMT_SETTING_SIZE },
9325 	{ set_link_security,       MGMT_SETTING_SIZE },
9326 	{ set_ssp,                 MGMT_SETTING_SIZE },
9327 	{ set_hs,                  MGMT_SETTING_SIZE },
9328 	{ set_le,                  MGMT_SETTING_SIZE },
9329 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9330 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9331 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9332 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9333 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9334 						HCI_MGMT_VAR_LEN },
9335 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9336 						HCI_MGMT_VAR_LEN },
9337 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9338 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9339 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9340 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9341 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9342 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9343 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9344 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9345 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9346 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9347 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9348 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9349 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9350 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9351 						HCI_MGMT_VAR_LEN },
9352 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9353 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9354 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9355 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9356 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9357 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9358 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9359 	{ set_advertising,         MGMT_SETTING_SIZE },
9360 	{ set_bredr,               MGMT_SETTING_SIZE },
9361 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9362 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9363 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9364 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9365 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9366 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9367 						HCI_MGMT_VAR_LEN },
9368 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9369 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9370 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9371 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9372 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9373 						HCI_MGMT_VAR_LEN },
9374 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9375 						HCI_MGMT_NO_HDEV |
9376 						HCI_MGMT_UNTRUSTED },
9377 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9378 						HCI_MGMT_UNCONFIGURED |
9379 						HCI_MGMT_UNTRUSTED },
9380 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9381 						HCI_MGMT_UNCONFIGURED },
9382 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9383 						HCI_MGMT_UNCONFIGURED },
9384 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9385 						HCI_MGMT_VAR_LEN },
9386 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9387 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9388 						HCI_MGMT_NO_HDEV |
9389 						HCI_MGMT_UNTRUSTED },
9390 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9391 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9392 						HCI_MGMT_VAR_LEN },
9393 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9394 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9395 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9396 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9397 						HCI_MGMT_UNTRUSTED },
9398 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9399 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9400 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9401 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9402 						HCI_MGMT_VAR_LEN },
9403 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9404 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9405 						HCI_MGMT_UNTRUSTED },
9406 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9407 						HCI_MGMT_UNTRUSTED |
9408 						HCI_MGMT_HDEV_OPTIONAL },
9409 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9410 						HCI_MGMT_VAR_LEN |
9411 						HCI_MGMT_HDEV_OPTIONAL },
9412 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9413 						HCI_MGMT_UNTRUSTED },
9414 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9415 						HCI_MGMT_VAR_LEN },
9416 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9417 						HCI_MGMT_UNTRUSTED },
9418 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9419 						HCI_MGMT_VAR_LEN },
9420 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9421 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9422 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9423 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9424 						HCI_MGMT_VAR_LEN },
9425 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9426 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9427 						HCI_MGMT_VAR_LEN },
9428 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9429 						HCI_MGMT_VAR_LEN },
9430 	{ add_adv_patterns_monitor_rssi,
9431 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9432 						HCI_MGMT_VAR_LEN },
9433 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9434 						HCI_MGMT_VAR_LEN },
9435 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9436 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9437 						HCI_MGMT_VAR_LEN },
9438 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9439 	{ mgmt_hci_cmd_sync,       MGMT_HCI_CMD_SYNC_SIZE, HCI_MGMT_VAR_LEN },
9440 };
9441 
9442 void mgmt_index_added(struct hci_dev *hdev)
9443 {
9444 	struct mgmt_ev_ext_index ev;
9445 
9446 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9447 		return;
9448 
9449 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9450 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9451 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9452 		ev.type = 0x01;
9453 	} else {
9454 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9455 				 HCI_MGMT_INDEX_EVENTS);
9456 		ev.type = 0x00;
9457 	}
9458 
9459 	ev.bus = hdev->bus;
9460 
9461 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9462 			 HCI_MGMT_EXT_INDEX_EVENTS);
9463 }
9464 
9465 void mgmt_index_removed(struct hci_dev *hdev)
9466 {
9467 	struct mgmt_ev_ext_index ev;
9468 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9469 
9470 	if (hci_test_quirk(hdev, HCI_QUIRK_RAW_DEVICE))
9471 		return;
9472 
9473 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9474 
9475 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9476 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9477 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9478 		ev.type = 0x01;
9479 	} else {
9480 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9481 				 HCI_MGMT_INDEX_EVENTS);
9482 		ev.type = 0x00;
9483 	}
9484 
9485 	ev.bus = hdev->bus;
9486 
9487 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9488 			 HCI_MGMT_EXT_INDEX_EVENTS);
9489 
9490 	/* Cancel any remaining timed work */
9491 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9492 		return;
9493 	cancel_delayed_work_sync(&hdev->discov_off);
9494 	cancel_delayed_work_sync(&hdev->service_cache);
9495 	cancel_delayed_work_sync(&hdev->rpa_expired);
9496 }
9497 
9498 void mgmt_power_on(struct hci_dev *hdev, int err)
9499 {
9500 	struct cmd_lookup match = { NULL, hdev };
9501 
9502 	bt_dev_dbg(hdev, "err %d", err);
9503 
9504 	hci_dev_lock(hdev);
9505 
9506 	if (!err) {
9507 		restart_le_actions(hdev);
9508 		hci_update_passive_scan(hdev);
9509 	}
9510 
9511 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9512 			     &match);
9513 
9514 	new_settings(hdev, match.sk);
9515 
9516 	if (match.sk)
9517 		sock_put(match.sk);
9518 
9519 	hci_dev_unlock(hdev);
9520 }
9521 
9522 void __mgmt_power_off(struct hci_dev *hdev)
9523 {
9524 	struct cmd_lookup match = { NULL, hdev };
9525 	u8 zero_cod[] = { 0, 0, 0 };
9526 
9527 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, true, settings_rsp,
9528 			     &match);
9529 
9530 	/* If the power off is because of hdev unregistration let
9531 	 * use the appropriate INVALID_INDEX status. Otherwise use
9532 	 * NOT_POWERED. We cover both scenarios here since later in
9533 	 * mgmt_index_removed() any hci_conn callbacks will have already
9534 	 * been triggered, potentially causing misleading DISCONNECTED
9535 	 * status responses.
9536 	 */
9537 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9538 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9539 	else
9540 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9541 
9542 	mgmt_pending_foreach(0, hdev, true, cmd_complete_rsp, &match);
9543 
9544 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9545 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9546 				   zero_cod, sizeof(zero_cod),
9547 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9548 		ext_info_changed(hdev, NULL);
9549 	}
9550 
9551 	new_settings(hdev, match.sk);
9552 
9553 	if (match.sk)
9554 		sock_put(match.sk);
9555 }
9556 
9557 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9558 {
9559 	struct mgmt_pending_cmd *cmd;
9560 	u8 status;
9561 
9562 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9563 	if (!cmd)
9564 		return;
9565 
9566 	if (err == -ERFKILL)
9567 		status = MGMT_STATUS_RFKILLED;
9568 	else
9569 		status = MGMT_STATUS_FAILED;
9570 
9571 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9572 
9573 	mgmt_pending_remove(cmd);
9574 }
9575 
9576 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9577 		       bool persistent)
9578 {
9579 	struct mgmt_ev_new_link_key ev;
9580 
9581 	memset(&ev, 0, sizeof(ev));
9582 
9583 	ev.store_hint = persistent;
9584 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9585 	ev.key.addr.type = BDADDR_BREDR;
9586 	ev.key.type = key->type;
9587 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9588 	ev.key.pin_len = key->pin_len;
9589 
9590 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9591 }
9592 
9593 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9594 {
9595 	switch (ltk->type) {
9596 	case SMP_LTK:
9597 	case SMP_LTK_RESPONDER:
9598 		if (ltk->authenticated)
9599 			return MGMT_LTK_AUTHENTICATED;
9600 		return MGMT_LTK_UNAUTHENTICATED;
9601 	case SMP_LTK_P256:
9602 		if (ltk->authenticated)
9603 			return MGMT_LTK_P256_AUTH;
9604 		return MGMT_LTK_P256_UNAUTH;
9605 	case SMP_LTK_P256_DEBUG:
9606 		return MGMT_LTK_P256_DEBUG;
9607 	}
9608 
9609 	return MGMT_LTK_UNAUTHENTICATED;
9610 }
9611 
9612 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9613 {
9614 	struct mgmt_ev_new_long_term_key ev;
9615 
9616 	memset(&ev, 0, sizeof(ev));
9617 
9618 	/* Devices using resolvable or non-resolvable random addresses
9619 	 * without providing an identity resolving key don't require
9620 	 * to store long term keys. Their addresses will change the
9621 	 * next time around.
9622 	 *
9623 	 * Only when a remote device provides an identity address
9624 	 * make sure the long term key is stored. If the remote
9625 	 * identity is known, the long term keys are internally
9626 	 * mapped to the identity address. So allow static random
9627 	 * and public addresses here.
9628 	 */
9629 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9630 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9631 		ev.store_hint = 0x00;
9632 	else
9633 		ev.store_hint = persistent;
9634 
9635 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9636 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9637 	ev.key.type = mgmt_ltk_type(key);
9638 	ev.key.enc_size = key->enc_size;
9639 	ev.key.ediv = key->ediv;
9640 	ev.key.rand = key->rand;
9641 
9642 	if (key->type == SMP_LTK)
9643 		ev.key.initiator = 1;
9644 
9645 	/* Make sure we copy only the significant bytes based on the
9646 	 * encryption key size, and set the rest of the value to zeroes.
9647 	 */
9648 	memcpy(ev.key.val, key->val, key->enc_size);
9649 	memset(ev.key.val + key->enc_size, 0,
9650 	       sizeof(ev.key.val) - key->enc_size);
9651 
9652 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9653 }
9654 
9655 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9656 {
9657 	struct mgmt_ev_new_irk ev;
9658 
9659 	memset(&ev, 0, sizeof(ev));
9660 
9661 	ev.store_hint = persistent;
9662 
9663 	bacpy(&ev.rpa, &irk->rpa);
9664 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9665 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9666 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9667 
9668 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9669 }
9670 
9671 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9672 		   bool persistent)
9673 {
9674 	struct mgmt_ev_new_csrk ev;
9675 
9676 	memset(&ev, 0, sizeof(ev));
9677 
9678 	/* Devices using resolvable or non-resolvable random addresses
9679 	 * without providing an identity resolving key don't require
9680 	 * to store signature resolving keys. Their addresses will change
9681 	 * the next time around.
9682 	 *
9683 	 * Only when a remote device provides an identity address
9684 	 * make sure the signature resolving key is stored. So allow
9685 	 * static random and public addresses here.
9686 	 */
9687 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9688 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9689 		ev.store_hint = 0x00;
9690 	else
9691 		ev.store_hint = persistent;
9692 
9693 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9694 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9695 	ev.key.type = csrk->type;
9696 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9697 
9698 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9699 }
9700 
9701 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9702 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9703 			 u16 max_interval, u16 latency, u16 timeout)
9704 {
9705 	struct mgmt_ev_new_conn_param ev;
9706 
9707 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9708 		return;
9709 
9710 	memset(&ev, 0, sizeof(ev));
9711 	bacpy(&ev.addr.bdaddr, bdaddr);
9712 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9713 	ev.store_hint = store_hint;
9714 	ev.min_interval = cpu_to_le16(min_interval);
9715 	ev.max_interval = cpu_to_le16(max_interval);
9716 	ev.latency = cpu_to_le16(latency);
9717 	ev.timeout = cpu_to_le16(timeout);
9718 
9719 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9720 }
9721 
9722 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9723 			   u8 *name, u8 name_len)
9724 {
9725 	struct sk_buff *skb;
9726 	struct mgmt_ev_device_connected *ev;
9727 	u16 eir_len = 0;
9728 	u32 flags = 0;
9729 
9730 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9731 		return;
9732 
9733 	/* allocate buff for LE or BR/EDR adv */
9734 	if (conn->le_adv_data_len > 0)
9735 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9736 				     sizeof(*ev) + conn->le_adv_data_len);
9737 	else
9738 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9739 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9740 				     eir_precalc_len(sizeof(conn->dev_class)));
9741 
9742 	if (!skb)
9743 		return;
9744 
9745 	ev = skb_put(skb, sizeof(*ev));
9746 	bacpy(&ev->addr.bdaddr, &conn->dst);
9747 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9748 
9749 	if (conn->out)
9750 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9751 
9752 	ev->flags = __cpu_to_le32(flags);
9753 
9754 	/* We must ensure that the EIR Data fields are ordered and
9755 	 * unique. Keep it simple for now and avoid the problem by not
9756 	 * adding any BR/EDR data to the LE adv.
9757 	 */
9758 	if (conn->le_adv_data_len > 0) {
9759 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9760 		eir_len = conn->le_adv_data_len;
9761 	} else {
9762 		if (name)
9763 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9764 
9765 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9766 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9767 						    conn->dev_class, sizeof(conn->dev_class));
9768 	}
9769 
9770 	ev->eir_len = cpu_to_le16(eir_len);
9771 
9772 	mgmt_event_skb(skb, NULL);
9773 }
9774 
9775 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9776 {
9777 	struct hci_dev *hdev = data;
9778 	struct mgmt_cp_unpair_device *cp = cmd->param;
9779 
9780 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9781 
9782 	cmd->cmd_complete(cmd, 0);
9783 }
9784 
9785 bool mgmt_powering_down(struct hci_dev *hdev)
9786 {
9787 	struct mgmt_pending_cmd *cmd;
9788 	struct mgmt_mode *cp;
9789 
9790 	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9791 		return true;
9792 
9793 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9794 	if (!cmd)
9795 		return false;
9796 
9797 	cp = cmd->param;
9798 	if (!cp->val)
9799 		return true;
9800 
9801 	return false;
9802 }
9803 
9804 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9805 			      u8 link_type, u8 addr_type, u8 reason,
9806 			      bool mgmt_connected)
9807 {
9808 	struct mgmt_ev_device_disconnected ev;
9809 	struct sock *sk = NULL;
9810 
9811 	if (!mgmt_connected)
9812 		return;
9813 
9814 	if (link_type != ACL_LINK &&
9815 	    link_type != LE_LINK  &&
9816 	    link_type != BIS_LINK)
9817 		return;
9818 
9819 	bacpy(&ev.addr.bdaddr, bdaddr);
9820 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9821 	ev.reason = reason;
9822 
9823 	/* Report disconnects due to suspend */
9824 	if (hdev->suspended)
9825 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9826 
9827 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9828 
9829 	if (sk)
9830 		sock_put(sk);
9831 }
9832 
9833 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 			    u8 link_type, u8 addr_type, u8 status)
9835 {
9836 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9837 	struct mgmt_cp_disconnect *cp;
9838 	struct mgmt_pending_cmd *cmd;
9839 
9840 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, true,
9841 			     unpair_device_rsp, hdev);
9842 
9843 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9844 	if (!cmd)
9845 		return;
9846 
9847 	cp = cmd->param;
9848 
9849 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9850 		return;
9851 
9852 	if (cp->addr.type != bdaddr_type)
9853 		return;
9854 
9855 	cmd->cmd_complete(cmd, mgmt_status(status));
9856 	mgmt_pending_remove(cmd);
9857 }
9858 
9859 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9860 {
9861 	struct mgmt_ev_connect_failed ev;
9862 
9863 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9864 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9865 					 conn->dst_type, status, true);
9866 		return;
9867 	}
9868 
9869 	bacpy(&ev.addr.bdaddr, &conn->dst);
9870 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9871 	ev.status = mgmt_status(status);
9872 
9873 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9874 }
9875 
9876 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9877 {
9878 	struct mgmt_ev_pin_code_request ev;
9879 
9880 	bacpy(&ev.addr.bdaddr, bdaddr);
9881 	ev.addr.type = BDADDR_BREDR;
9882 	ev.secure = secure;
9883 
9884 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9885 }
9886 
9887 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9888 				  u8 status)
9889 {
9890 	struct mgmt_pending_cmd *cmd;
9891 
9892 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9893 	if (!cmd)
9894 		return;
9895 
9896 	cmd->cmd_complete(cmd, mgmt_status(status));
9897 	mgmt_pending_remove(cmd);
9898 }
9899 
9900 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9901 				      u8 status)
9902 {
9903 	struct mgmt_pending_cmd *cmd;
9904 
9905 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9906 	if (!cmd)
9907 		return;
9908 
9909 	cmd->cmd_complete(cmd, mgmt_status(status));
9910 	mgmt_pending_remove(cmd);
9911 }
9912 
9913 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9914 			      u8 link_type, u8 addr_type, u32 value,
9915 			      u8 confirm_hint)
9916 {
9917 	struct mgmt_ev_user_confirm_request ev;
9918 
9919 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9920 
9921 	bacpy(&ev.addr.bdaddr, bdaddr);
9922 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9923 	ev.confirm_hint = confirm_hint;
9924 	ev.value = cpu_to_le32(value);
9925 
9926 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9927 			  NULL);
9928 }
9929 
9930 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9931 			      u8 link_type, u8 addr_type)
9932 {
9933 	struct mgmt_ev_user_passkey_request ev;
9934 
9935 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9936 
9937 	bacpy(&ev.addr.bdaddr, bdaddr);
9938 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9939 
9940 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9941 			  NULL);
9942 }
9943 
9944 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9945 				      u8 link_type, u8 addr_type, u8 status,
9946 				      u8 opcode)
9947 {
9948 	struct mgmt_pending_cmd *cmd;
9949 
9950 	cmd = pending_find(opcode, hdev);
9951 	if (!cmd)
9952 		return -ENOENT;
9953 
9954 	cmd->cmd_complete(cmd, mgmt_status(status));
9955 	mgmt_pending_remove(cmd);
9956 
9957 	return 0;
9958 }
9959 
9960 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9961 				     u8 link_type, u8 addr_type, u8 status)
9962 {
9963 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9964 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9965 }
9966 
9967 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9968 					 u8 link_type, u8 addr_type, u8 status)
9969 {
9970 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9971 					  status,
9972 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9973 }
9974 
9975 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9976 				     u8 link_type, u8 addr_type, u8 status)
9977 {
9978 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9979 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9980 }
9981 
9982 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9983 					 u8 link_type, u8 addr_type, u8 status)
9984 {
9985 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9986 					  status,
9987 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9988 }
9989 
9990 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9991 			     u8 link_type, u8 addr_type, u32 passkey,
9992 			     u8 entered)
9993 {
9994 	struct mgmt_ev_passkey_notify ev;
9995 
9996 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9997 
9998 	bacpy(&ev.addr.bdaddr, bdaddr);
9999 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
10000 	ev.passkey = __cpu_to_le32(passkey);
10001 	ev.entered = entered;
10002 
10003 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
10004 }
10005 
10006 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
10007 {
10008 	struct mgmt_ev_auth_failed ev;
10009 	struct mgmt_pending_cmd *cmd;
10010 	u8 status = mgmt_status(hci_status);
10011 
10012 	bacpy(&ev.addr.bdaddr, &conn->dst);
10013 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
10014 	ev.status = status;
10015 
10016 	cmd = find_pairing(conn);
10017 
10018 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
10019 		    cmd ? cmd->sk : NULL);
10020 
10021 	if (cmd) {
10022 		cmd->cmd_complete(cmd, status);
10023 		mgmt_pending_remove(cmd);
10024 	}
10025 }
10026 
10027 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
10028 {
10029 	struct cmd_lookup match = { NULL, hdev };
10030 	bool changed;
10031 
10032 	if (status) {
10033 		u8 mgmt_err = mgmt_status(status);
10034 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10035 				     cmd_status_rsp, &mgmt_err);
10036 		return;
10037 	}
10038 
10039 	if (test_bit(HCI_AUTH, &hdev->flags))
10040 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10041 	else
10042 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10043 
10044 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, true,
10045 			     settings_rsp, &match);
10046 
10047 	if (changed)
10048 		new_settings(hdev, match.sk);
10049 
10050 	if (match.sk)
10051 		sock_put(match.sk);
10052 }
10053 
10054 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10055 {
10056 	struct cmd_lookup *match = data;
10057 
10058 	if (match->sk == NULL) {
10059 		match->sk = cmd->sk;
10060 		sock_hold(match->sk);
10061 	}
10062 }
10063 
10064 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10065 				    u8 status)
10066 {
10067 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10068 
10069 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, false, sk_lookup,
10070 			     &match);
10071 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, false, sk_lookup,
10072 			     &match);
10073 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, false, sk_lookup,
10074 			     &match);
10075 
10076 	if (!status) {
10077 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10078 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10079 		ext_info_changed(hdev, NULL);
10080 	}
10081 
10082 	if (match.sk)
10083 		sock_put(match.sk);
10084 }
10085 
10086 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10087 {
10088 	struct mgmt_cp_set_local_name ev;
10089 	struct mgmt_pending_cmd *cmd;
10090 
10091 	if (status)
10092 		return;
10093 
10094 	memset(&ev, 0, sizeof(ev));
10095 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10096 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10097 
10098 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10099 	if (!cmd) {
10100 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10101 
10102 		/* If this is a HCI command related to powering on the
10103 		 * HCI dev don't send any mgmt signals.
10104 		 */
10105 		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10106 			return;
10107 
10108 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10109 			return;
10110 	}
10111 
10112 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10113 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10114 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10115 }
10116 
10117 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10118 {
10119 	int i;
10120 
10121 	for (i = 0; i < uuid_count; i++) {
10122 		if (!memcmp(uuid, uuids[i], 16))
10123 			return true;
10124 	}
10125 
10126 	return false;
10127 }
10128 
10129 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10130 {
10131 	u16 parsed = 0;
10132 
10133 	while (parsed < eir_len) {
10134 		u8 field_len = eir[0];
10135 		u8 uuid[16];
10136 		int i;
10137 
10138 		if (field_len == 0)
10139 			break;
10140 
10141 		if (eir_len - parsed < field_len + 1)
10142 			break;
10143 
10144 		switch (eir[1]) {
10145 		case EIR_UUID16_ALL:
10146 		case EIR_UUID16_SOME:
10147 			for (i = 0; i + 3 <= field_len; i += 2) {
10148 				memcpy(uuid, bluetooth_base_uuid, 16);
10149 				uuid[13] = eir[i + 3];
10150 				uuid[12] = eir[i + 2];
10151 				if (has_uuid(uuid, uuid_count, uuids))
10152 					return true;
10153 			}
10154 			break;
10155 		case EIR_UUID32_ALL:
10156 		case EIR_UUID32_SOME:
10157 			for (i = 0; i + 5 <= field_len; i += 4) {
10158 				memcpy(uuid, bluetooth_base_uuid, 16);
10159 				uuid[15] = eir[i + 5];
10160 				uuid[14] = eir[i + 4];
10161 				uuid[13] = eir[i + 3];
10162 				uuid[12] = eir[i + 2];
10163 				if (has_uuid(uuid, uuid_count, uuids))
10164 					return true;
10165 			}
10166 			break;
10167 		case EIR_UUID128_ALL:
10168 		case EIR_UUID128_SOME:
10169 			for (i = 0; i + 17 <= field_len; i += 16) {
10170 				memcpy(uuid, eir + i + 2, 16);
10171 				if (has_uuid(uuid, uuid_count, uuids))
10172 					return true;
10173 			}
10174 			break;
10175 		}
10176 
10177 		parsed += field_len + 1;
10178 		eir += field_len + 1;
10179 	}
10180 
10181 	return false;
10182 }
10183 
10184 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10185 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10186 {
10187 	/* If a RSSI threshold has been specified, and
10188 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10189 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10190 	 * is set, let it through for further processing, as we might need to
10191 	 * restart the scan.
10192 	 *
10193 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10194 	 * the results are also dropped.
10195 	 */
10196 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10197 	    (rssi == HCI_RSSI_INVALID ||
10198 	    (rssi < hdev->discovery.rssi &&
10199 	     !hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER))))
10200 		return  false;
10201 
10202 	if (hdev->discovery.uuid_count != 0) {
10203 		/* If a list of UUIDs is provided in filter, results with no
10204 		 * matching UUID should be dropped.
10205 		 */
10206 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10207 				   hdev->discovery.uuids) &&
10208 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10209 				   hdev->discovery.uuid_count,
10210 				   hdev->discovery.uuids))
10211 			return false;
10212 	}
10213 
10214 	/* If duplicate filtering does not report RSSI changes, then restart
10215 	 * scanning to ensure updated result with updated RSSI values.
10216 	 */
10217 	if (hci_test_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER)) {
10218 		/* Validate RSSI value against the RSSI threshold once more. */
10219 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10220 		    rssi < hdev->discovery.rssi)
10221 			return false;
10222 	}
10223 
10224 	return true;
10225 }
10226 
10227 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10228 				  bdaddr_t *bdaddr, u8 addr_type)
10229 {
10230 	struct mgmt_ev_adv_monitor_device_lost ev;
10231 
10232 	ev.monitor_handle = cpu_to_le16(handle);
10233 	bacpy(&ev.addr.bdaddr, bdaddr);
10234 	ev.addr.type = addr_type;
10235 
10236 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10237 		   NULL);
10238 }
10239 
10240 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10241 					       struct sk_buff *skb,
10242 					       struct sock *skip_sk,
10243 					       u16 handle)
10244 {
10245 	struct sk_buff *advmon_skb;
10246 	size_t advmon_skb_len;
10247 	__le16 *monitor_handle;
10248 
10249 	if (!skb)
10250 		return;
10251 
10252 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10253 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10254 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10255 				    advmon_skb_len);
10256 	if (!advmon_skb)
10257 		return;
10258 
10259 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10260 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10261 	 * store monitor_handle of the matched monitor.
10262 	 */
10263 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10264 	*monitor_handle = cpu_to_le16(handle);
10265 	skb_put_data(advmon_skb, skb->data, skb->len);
10266 
10267 	mgmt_event_skb(advmon_skb, skip_sk);
10268 }
10269 
10270 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10271 					  bdaddr_t *bdaddr, bool report_device,
10272 					  struct sk_buff *skb,
10273 					  struct sock *skip_sk)
10274 {
10275 	struct monitored_device *dev, *tmp;
10276 	bool matched = false;
10277 	bool notified = false;
10278 
10279 	/* We have received the Advertisement Report because:
10280 	 * 1. the kernel has initiated active discovery
10281 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10282 	 *    passive scanning
10283 	 * 3. if none of the above is true, we have one or more active
10284 	 *    Advertisement Monitor
10285 	 *
10286 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10287 	 * and report ONLY one advertisement per device for the matched Monitor
10288 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10289 	 *
10290 	 * For case 3, since we are not active scanning and all advertisements
10291 	 * received are due to a matched Advertisement Monitor, report all
10292 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10293 	 */
10294 	if (report_device && !hdev->advmon_pend_notify) {
10295 		mgmt_event_skb(skb, skip_sk);
10296 		return;
10297 	}
10298 
10299 	hdev->advmon_pend_notify = false;
10300 
10301 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10302 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10303 			matched = true;
10304 
10305 			if (!dev->notified) {
10306 				mgmt_send_adv_monitor_device_found(hdev, skb,
10307 								   skip_sk,
10308 								   dev->handle);
10309 				notified = true;
10310 				dev->notified = true;
10311 			}
10312 		}
10313 
10314 		if (!dev->notified)
10315 			hdev->advmon_pend_notify = true;
10316 	}
10317 
10318 	if (!report_device &&
10319 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10320 		/* Handle 0 indicates that we are not active scanning and this
10321 		 * is a subsequent advertisement report for an already matched
10322 		 * Advertisement Monitor or the controller offloading support
10323 		 * is not available.
10324 		 */
10325 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10326 	}
10327 
10328 	if (report_device)
10329 		mgmt_event_skb(skb, skip_sk);
10330 	else
10331 		kfree_skb(skb);
10332 }
10333 
10334 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10335 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10336 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10337 			      u64 instant)
10338 {
10339 	struct sk_buff *skb;
10340 	struct mgmt_ev_mesh_device_found *ev;
10341 	int i, j;
10342 
10343 	if (!hdev->mesh_ad_types[0])
10344 		goto accepted;
10345 
10346 	/* Scan for requested AD types */
10347 	if (eir_len > 0) {
10348 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10349 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10350 				if (!hdev->mesh_ad_types[j])
10351 					break;
10352 
10353 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10354 					goto accepted;
10355 			}
10356 		}
10357 	}
10358 
10359 	if (scan_rsp_len > 0) {
10360 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10361 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10362 				if (!hdev->mesh_ad_types[j])
10363 					break;
10364 
10365 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10366 					goto accepted;
10367 			}
10368 		}
10369 	}
10370 
10371 	return;
10372 
10373 accepted:
10374 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10375 			     sizeof(*ev) + eir_len + scan_rsp_len);
10376 	if (!skb)
10377 		return;
10378 
10379 	ev = skb_put(skb, sizeof(*ev));
10380 
10381 	bacpy(&ev->addr.bdaddr, bdaddr);
10382 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10383 	ev->rssi = rssi;
10384 	ev->flags = cpu_to_le32(flags);
10385 	ev->instant = cpu_to_le64(instant);
10386 
10387 	if (eir_len > 0)
10388 		/* Copy EIR or advertising data into event */
10389 		skb_put_data(skb, eir, eir_len);
10390 
10391 	if (scan_rsp_len > 0)
10392 		/* Append scan response data to event */
10393 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10394 
10395 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10396 
10397 	mgmt_event_skb(skb, NULL);
10398 }
10399 
10400 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10401 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10402 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10403 		       u64 instant)
10404 {
10405 	struct sk_buff *skb;
10406 	struct mgmt_ev_device_found *ev;
10407 	bool report_device = hci_discovery_active(hdev);
10408 
10409 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10410 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10411 				  eir, eir_len, scan_rsp, scan_rsp_len,
10412 				  instant);
10413 
10414 	/* Don't send events for a non-kernel initiated discovery. With
10415 	 * LE one exception is if we have pend_le_reports > 0 in which
10416 	 * case we're doing passive scanning and want these events.
10417 	 */
10418 	if (!hci_discovery_active(hdev)) {
10419 		if (link_type == ACL_LINK)
10420 			return;
10421 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10422 			report_device = true;
10423 		else if (!hci_is_adv_monitoring(hdev))
10424 			return;
10425 	}
10426 
10427 	if (hdev->discovery.result_filtering) {
10428 		/* We are using service discovery */
10429 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10430 				     scan_rsp_len))
10431 			return;
10432 	}
10433 
10434 	if (hdev->discovery.limited) {
10435 		/* Check for limited discoverable bit */
10436 		if (dev_class) {
10437 			if (!(dev_class[1] & 0x20))
10438 				return;
10439 		} else {
10440 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10441 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10442 				return;
10443 		}
10444 	}
10445 
10446 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10447 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10448 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10449 	if (!skb)
10450 		return;
10451 
10452 	ev = skb_put(skb, sizeof(*ev));
10453 
10454 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10455 	 * RSSI value was reported as 0 when not available. This behavior
10456 	 * is kept when using device discovery. This is required for full
10457 	 * backwards compatibility with the API.
10458 	 *
10459 	 * However when using service discovery, the value 127 will be
10460 	 * returned when the RSSI is not available.
10461 	 */
10462 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10463 	    link_type == ACL_LINK)
10464 		rssi = 0;
10465 
10466 	bacpy(&ev->addr.bdaddr, bdaddr);
10467 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10468 	ev->rssi = rssi;
10469 	ev->flags = cpu_to_le32(flags);
10470 
10471 	if (eir_len > 0)
10472 		/* Copy EIR or advertising data into event */
10473 		skb_put_data(skb, eir, eir_len);
10474 
10475 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10476 		u8 eir_cod[5];
10477 
10478 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10479 					   dev_class, 3);
10480 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10481 	}
10482 
10483 	if (scan_rsp_len > 0)
10484 		/* Append scan response data to event */
10485 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10486 
10487 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10488 
10489 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10490 }
10491 
10492 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10493 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10494 {
10495 	struct sk_buff *skb;
10496 	struct mgmt_ev_device_found *ev;
10497 	u16 eir_len = 0;
10498 	u32 flags = 0;
10499 
10500 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10501 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10502 	if (!skb)
10503 		return;
10504 
10505 	ev = skb_put(skb, sizeof(*ev));
10506 	bacpy(&ev->addr.bdaddr, bdaddr);
10507 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10508 	ev->rssi = rssi;
10509 
10510 	if (name)
10511 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10512 	else
10513 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10514 
10515 	ev->eir_len = cpu_to_le16(eir_len);
10516 	ev->flags = cpu_to_le32(flags);
10517 
10518 	mgmt_event_skb(skb, NULL);
10519 }
10520 
10521 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10522 {
10523 	struct mgmt_ev_discovering ev;
10524 
10525 	bt_dev_dbg(hdev, "discovering %u", discovering);
10526 
10527 	memset(&ev, 0, sizeof(ev));
10528 	ev.type = hdev->discovery.type;
10529 	ev.discovering = discovering;
10530 
10531 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10532 }
10533 
10534 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10535 {
10536 	struct mgmt_ev_controller_suspend ev;
10537 
10538 	ev.suspend_state = state;
10539 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10540 }
10541 
10542 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10543 		   u8 addr_type)
10544 {
10545 	struct mgmt_ev_controller_resume ev;
10546 
10547 	ev.wake_reason = reason;
10548 	if (bdaddr) {
10549 		bacpy(&ev.addr.bdaddr, bdaddr);
10550 		ev.addr.type = addr_type;
10551 	} else {
10552 		memset(&ev.addr, 0, sizeof(ev.addr));
10553 	}
10554 
10555 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10556 }
10557 
10558 static struct hci_mgmt_chan chan = {
10559 	.channel	= HCI_CHANNEL_CONTROL,
10560 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10561 	.handlers	= mgmt_handlers,
10562 	.hdev_init	= mgmt_init_hdev,
10563 };
10564 
10565 int mgmt_init(void)
10566 {
10567 	return hci_mgmt_chan_register(&chan);
10568 }
10569 
10570 void mgmt_exit(void)
10571 {
10572 	hci_mgmt_chan_unregister(&chan);
10573 }
10574 
10575 void mgmt_cleanup(struct sock *sk)
10576 {
10577 	struct mgmt_mesh_tx *mesh_tx;
10578 	struct hci_dev *hdev;
10579 
10580 	read_lock(&hci_dev_list_lock);
10581 
10582 	list_for_each_entry(hdev, &hci_dev_list, list) {
10583 		do {
10584 			mesh_tx = mgmt_mesh_next(hdev, sk);
10585 
10586 			if (mesh_tx)
10587 				mesh_send_complete(hdev, mesh_tx, true);
10588 		} while (mesh_tx);
10589 	}
10590 
10591 	read_unlock(&hci_dev_list_lock);
10592 }
10593