xref: /linux/net/bluetooth/mgmt.c (revision 1f8d99de1d1b4b3764203ae02db57041475dab84)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp = cmd->param;
1224 
1225 	bt_dev_dbg(hdev, "err %d", err);
1226 
1227 	if (!err) {
1228 		if (cp->val) {
1229 			hci_dev_lock(hdev);
1230 			restart_le_actions(hdev);
1231 			hci_update_passive_scan(hdev);
1232 			hci_dev_unlock(hdev);
1233 		}
1234 
1235 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1236 
1237 		/* Only call new_setting for power on as power off is deferred
1238 		 * to hdev->power_off work which does call hci_dev_do_close.
1239 		 */
1240 		if (cp->val)
1241 			new_settings(hdev, cmd->sk);
1242 	} else {
1243 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1244 				mgmt_status(err));
1245 	}
1246 
1247 	mgmt_pending_free(cmd);
1248 }
1249 
1250 static int set_powered_sync(struct hci_dev *hdev, void *data)
1251 {
1252 	struct mgmt_pending_cmd *cmd = data;
1253 	struct mgmt_mode *cp = cmd->param;
1254 
1255 	BT_DBG("%s", hdev->name);
1256 
1257 	return hci_set_powered_sync(hdev, cp->val);
1258 }
1259 
1260 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1261 		       u16 len)
1262 {
1263 	struct mgmt_mode *cp = data;
1264 	struct mgmt_pending_cmd *cmd;
1265 	int err;
1266 
1267 	bt_dev_dbg(hdev, "sock %p", sk);
1268 
1269 	if (cp->val != 0x00 && cp->val != 0x01)
1270 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1271 				       MGMT_STATUS_INVALID_PARAMS);
1272 
1273 	hci_dev_lock(hdev);
1274 
1275 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1276 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				      MGMT_STATUS_BUSY);
1278 		goto failed;
1279 	}
1280 
1281 	if (!!cp->val == hdev_is_powered(hdev)) {
1282 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1283 		goto failed;
1284 	}
1285 
1286 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1287 	if (!cmd) {
1288 		err = -ENOMEM;
1289 		goto failed;
1290 	}
1291 
1292 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1293 				 mgmt_set_powered_complete);
1294 
1295 failed:
1296 	hci_dev_unlock(hdev);
1297 	return err;
1298 }
1299 
1300 int mgmt_new_settings(struct hci_dev *hdev)
1301 {
1302 	return new_settings(hdev, NULL);
1303 }
1304 
1305 struct cmd_lookup {
1306 	struct sock *sk;
1307 	struct hci_dev *hdev;
1308 	u8 mgmt_status;
1309 };
1310 
1311 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1312 {
1313 	struct cmd_lookup *match = data;
1314 
1315 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1316 
1317 	list_del(&cmd->list);
1318 
1319 	if (match->sk == NULL) {
1320 		match->sk = cmd->sk;
1321 		sock_hold(match->sk);
1322 	}
1323 
1324 	mgmt_pending_free(cmd);
1325 }
1326 
1327 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1328 {
1329 	u8 *status = data;
1330 
1331 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1332 	mgmt_pending_remove(cmd);
1333 }
1334 
1335 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1336 {
1337 	if (cmd->cmd_complete) {
1338 		u8 *status = data;
1339 
1340 		cmd->cmd_complete(cmd, *status);
1341 		mgmt_pending_remove(cmd);
1342 
1343 		return;
1344 	}
1345 
1346 	cmd_status_rsp(cmd, data);
1347 }
1348 
1349 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1350 {
1351 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1352 				 cmd->param, cmd->param_len);
1353 }
1354 
1355 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1356 {
1357 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1358 				 cmd->param, sizeof(struct mgmt_addr_info));
1359 }
1360 
1361 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1362 {
1363 	if (!lmp_bredr_capable(hdev))
1364 		return MGMT_STATUS_NOT_SUPPORTED;
1365 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1366 		return MGMT_STATUS_REJECTED;
1367 	else
1368 		return MGMT_STATUS_SUCCESS;
1369 }
1370 
1371 static u8 mgmt_le_support(struct hci_dev *hdev)
1372 {
1373 	if (!lmp_le_capable(hdev))
1374 		return MGMT_STATUS_NOT_SUPPORTED;
1375 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1376 		return MGMT_STATUS_REJECTED;
1377 	else
1378 		return MGMT_STATUS_SUCCESS;
1379 }
1380 
1381 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1382 					   int err)
1383 {
1384 	struct mgmt_pending_cmd *cmd = data;
1385 
1386 	bt_dev_dbg(hdev, "err %d", err);
1387 
1388 	hci_dev_lock(hdev);
1389 
1390 	if (err) {
1391 		u8 mgmt_err = mgmt_status(err);
1392 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1393 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1394 		goto done;
1395 	}
1396 
1397 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1398 	    hdev->discov_timeout > 0) {
1399 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1400 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1401 	}
1402 
1403 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1404 	new_settings(hdev, cmd->sk);
1405 
1406 done:
1407 	mgmt_pending_free(cmd);
1408 	hci_dev_unlock(hdev);
1409 }
1410 
1411 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1412 {
1413 	BT_DBG("%s", hdev->name);
1414 
1415 	return hci_update_discoverable_sync(hdev);
1416 }
1417 
1418 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1419 			    u16 len)
1420 {
1421 	struct mgmt_cp_set_discoverable *cp = data;
1422 	struct mgmt_pending_cmd *cmd;
1423 	u16 timeout;
1424 	int err;
1425 
1426 	bt_dev_dbg(hdev, "sock %p", sk);
1427 
1428 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1429 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 				       MGMT_STATUS_REJECTED);
1432 
1433 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1435 				       MGMT_STATUS_INVALID_PARAMS);
1436 
1437 	timeout = __le16_to_cpu(cp->timeout);
1438 
1439 	/* Disabling discoverable requires that no timeout is set,
1440 	 * and enabling limited discoverable requires a timeout.
1441 	 */
1442 	if ((cp->val == 0x00 && timeout > 0) ||
1443 	    (cp->val == 0x02 && timeout == 0))
1444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1445 				       MGMT_STATUS_INVALID_PARAMS);
1446 
1447 	hci_dev_lock(hdev);
1448 
1449 	if (!hdev_is_powered(hdev) && timeout > 0) {
1450 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1451 				      MGMT_STATUS_NOT_POWERED);
1452 		goto failed;
1453 	}
1454 
1455 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1456 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1457 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				      MGMT_STATUS_BUSY);
1459 		goto failed;
1460 	}
1461 
1462 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_REJECTED);
1465 		goto failed;
1466 	}
1467 
1468 	if (hdev->advertising_paused) {
1469 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1470 				      MGMT_STATUS_BUSY);
1471 		goto failed;
1472 	}
1473 
1474 	if (!hdev_is_powered(hdev)) {
1475 		bool changed = false;
1476 
1477 		/* Setting limited discoverable when powered off is
1478 		 * not a valid operation since it requires a timeout
1479 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1480 		 */
1481 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1482 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1483 			changed = true;
1484 		}
1485 
1486 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1487 		if (err < 0)
1488 			goto failed;
1489 
1490 		if (changed)
1491 			err = new_settings(hdev, sk);
1492 
1493 		goto failed;
1494 	}
1495 
1496 	/* If the current mode is the same, then just update the timeout
1497 	 * value with the new value. And if only the timeout gets updated,
1498 	 * then no need for any HCI transactions.
1499 	 */
1500 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1501 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1502 						   HCI_LIMITED_DISCOVERABLE)) {
1503 		cancel_delayed_work(&hdev->discov_off);
1504 		hdev->discov_timeout = timeout;
1505 
1506 		if (cp->val && hdev->discov_timeout > 0) {
1507 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1508 			queue_delayed_work(hdev->req_workqueue,
1509 					   &hdev->discov_off, to);
1510 		}
1511 
1512 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1513 		goto failed;
1514 	}
1515 
1516 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1517 	if (!cmd) {
1518 		err = -ENOMEM;
1519 		goto failed;
1520 	}
1521 
1522 	/* Cancel any potential discoverable timeout that might be
1523 	 * still active and store new timeout value. The arming of
1524 	 * the timeout happens in the complete handler.
1525 	 */
1526 	cancel_delayed_work(&hdev->discov_off);
1527 	hdev->discov_timeout = timeout;
1528 
1529 	if (cp->val)
1530 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1531 	else
1532 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1533 
1534 	/* Limited discoverable mode */
1535 	if (cp->val == 0x02)
1536 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 	else
1538 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1539 
1540 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1541 				 mgmt_set_discoverable_complete);
1542 
1543 failed:
1544 	hci_dev_unlock(hdev);
1545 	return err;
1546 }
1547 
1548 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1549 					  int err)
1550 {
1551 	struct mgmt_pending_cmd *cmd = data;
1552 
1553 	bt_dev_dbg(hdev, "err %d", err);
1554 
1555 	hci_dev_lock(hdev);
1556 
1557 	if (err) {
1558 		u8 mgmt_err = mgmt_status(err);
1559 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1560 		goto done;
1561 	}
1562 
1563 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1564 	new_settings(hdev, cmd->sk);
1565 
1566 done:
1567 	mgmt_pending_free(cmd);
1568 	hci_dev_unlock(hdev);
1569 }
1570 
1571 static int set_connectable_update_settings(struct hci_dev *hdev,
1572 					   struct sock *sk, u8 val)
1573 {
1574 	bool changed = false;
1575 	int err;
1576 
1577 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1578 		changed = true;
1579 
1580 	if (val) {
1581 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1582 	} else {
1583 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1584 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1585 	}
1586 
1587 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1588 	if (err < 0)
1589 		return err;
1590 
1591 	if (changed) {
1592 		hci_req_update_scan(hdev);
1593 		hci_update_passive_scan(hdev);
1594 		return new_settings(hdev, sk);
1595 	}
1596 
1597 	return 0;
1598 }
1599 
1600 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1601 {
1602 	BT_DBG("%s", hdev->name);
1603 
1604 	return hci_update_connectable_sync(hdev);
1605 }
1606 
1607 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1608 			   u16 len)
1609 {
1610 	struct mgmt_mode *cp = data;
1611 	struct mgmt_pending_cmd *cmd;
1612 	int err;
1613 
1614 	bt_dev_dbg(hdev, "sock %p", sk);
1615 
1616 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1617 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1618 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1619 				       MGMT_STATUS_REJECTED);
1620 
1621 	if (cp->val != 0x00 && cp->val != 0x01)
1622 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1623 				       MGMT_STATUS_INVALID_PARAMS);
1624 
1625 	hci_dev_lock(hdev);
1626 
1627 	if (!hdev_is_powered(hdev)) {
1628 		err = set_connectable_update_settings(hdev, sk, cp->val);
1629 		goto failed;
1630 	}
1631 
1632 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1633 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1634 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1635 				      MGMT_STATUS_BUSY);
1636 		goto failed;
1637 	}
1638 
1639 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1640 	if (!cmd) {
1641 		err = -ENOMEM;
1642 		goto failed;
1643 	}
1644 
1645 	if (cp->val) {
1646 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1647 	} else {
1648 		if (hdev->discov_timeout > 0)
1649 			cancel_delayed_work(&hdev->discov_off);
1650 
1651 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1652 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1653 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1654 	}
1655 
1656 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1657 				 mgmt_set_connectable_complete);
1658 
1659 failed:
1660 	hci_dev_unlock(hdev);
1661 	return err;
1662 }
1663 
1664 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1665 			u16 len)
1666 {
1667 	struct mgmt_mode *cp = data;
1668 	bool changed;
1669 	int err;
1670 
1671 	bt_dev_dbg(hdev, "sock %p", sk);
1672 
1673 	if (cp->val != 0x00 && cp->val != 0x01)
1674 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1675 				       MGMT_STATUS_INVALID_PARAMS);
1676 
1677 	hci_dev_lock(hdev);
1678 
1679 	if (cp->val)
1680 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1681 	else
1682 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1683 
1684 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1685 	if (err < 0)
1686 		goto unlock;
1687 
1688 	if (changed) {
1689 		/* In limited privacy mode the change of bondable mode
1690 		 * may affect the local advertising address.
1691 		 */
1692 		hci_update_discoverable(hdev);
1693 
1694 		err = new_settings(hdev, sk);
1695 	}
1696 
1697 unlock:
1698 	hci_dev_unlock(hdev);
1699 	return err;
1700 }
1701 
1702 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1703 			     u16 len)
1704 {
1705 	struct mgmt_mode *cp = data;
1706 	struct mgmt_pending_cmd *cmd;
1707 	u8 val, status;
1708 	int err;
1709 
1710 	bt_dev_dbg(hdev, "sock %p", sk);
1711 
1712 	status = mgmt_bredr_support(hdev);
1713 	if (status)
1714 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1715 				       status);
1716 
1717 	if (cp->val != 0x00 && cp->val != 0x01)
1718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1719 				       MGMT_STATUS_INVALID_PARAMS);
1720 
1721 	hci_dev_lock(hdev);
1722 
1723 	if (!hdev_is_powered(hdev)) {
1724 		bool changed = false;
1725 
1726 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1727 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1728 			changed = true;
1729 		}
1730 
1731 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1732 		if (err < 0)
1733 			goto failed;
1734 
1735 		if (changed)
1736 			err = new_settings(hdev, sk);
1737 
1738 		goto failed;
1739 	}
1740 
1741 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1742 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1743 				      MGMT_STATUS_BUSY);
1744 		goto failed;
1745 	}
1746 
1747 	val = !!cp->val;
1748 
1749 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1750 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1751 		goto failed;
1752 	}
1753 
1754 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1755 	if (!cmd) {
1756 		err = -ENOMEM;
1757 		goto failed;
1758 	}
1759 
1760 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1761 	if (err < 0) {
1762 		mgmt_pending_remove(cmd);
1763 		goto failed;
1764 	}
1765 
1766 failed:
1767 	hci_dev_unlock(hdev);
1768 	return err;
1769 }
1770 
1771 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1772 {
1773 	struct cmd_lookup match = { NULL, hdev };
1774 	struct mgmt_pending_cmd *cmd = data;
1775 	struct mgmt_mode *cp = cmd->param;
1776 	u8 enable = cp->val;
1777 	bool changed;
1778 
1779 	if (err) {
1780 		u8 mgmt_err = mgmt_status(err);
1781 
1782 		if (enable && hci_dev_test_and_clear_flag(hdev,
1783 							  HCI_SSP_ENABLED)) {
1784 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1785 			new_settings(hdev, NULL);
1786 		}
1787 
1788 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1789 				     &mgmt_err);
1790 		return;
1791 	}
1792 
1793 	if (enable) {
1794 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1795 	} else {
1796 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1797 
1798 		if (!changed)
1799 			changed = hci_dev_test_and_clear_flag(hdev,
1800 							      HCI_HS_ENABLED);
1801 		else
1802 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1803 	}
1804 
1805 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1806 
1807 	if (changed)
1808 		new_settings(hdev, match.sk);
1809 
1810 	if (match.sk)
1811 		sock_put(match.sk);
1812 
1813 	hci_update_eir_sync(hdev);
1814 }
1815 
1816 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1817 {
1818 	struct mgmt_pending_cmd *cmd = data;
1819 	struct mgmt_mode *cp = cmd->param;
1820 	bool changed = false;
1821 	int err;
1822 
1823 	if (cp->val)
1824 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1825 
1826 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1827 
1828 	if (!err && changed)
1829 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1830 
1831 	return err;
1832 }
1833 
1834 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1835 {
1836 	struct mgmt_mode *cp = data;
1837 	struct mgmt_pending_cmd *cmd;
1838 	u8 status;
1839 	int err;
1840 
1841 	bt_dev_dbg(hdev, "sock %p", sk);
1842 
1843 	status = mgmt_bredr_support(hdev);
1844 	if (status)
1845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1846 
1847 	if (!lmp_ssp_capable(hdev))
1848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1849 				       MGMT_STATUS_NOT_SUPPORTED);
1850 
1851 	if (cp->val != 0x00 && cp->val != 0x01)
1852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1853 				       MGMT_STATUS_INVALID_PARAMS);
1854 
1855 	hci_dev_lock(hdev);
1856 
1857 	if (!hdev_is_powered(hdev)) {
1858 		bool changed;
1859 
1860 		if (cp->val) {
1861 			changed = !hci_dev_test_and_set_flag(hdev,
1862 							     HCI_SSP_ENABLED);
1863 		} else {
1864 			changed = hci_dev_test_and_clear_flag(hdev,
1865 							      HCI_SSP_ENABLED);
1866 			if (!changed)
1867 				changed = hci_dev_test_and_clear_flag(hdev,
1868 								      HCI_HS_ENABLED);
1869 			else
1870 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1871 		}
1872 
1873 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1874 		if (err < 0)
1875 			goto failed;
1876 
1877 		if (changed)
1878 			err = new_settings(hdev, sk);
1879 
1880 		goto failed;
1881 	}
1882 
1883 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1884 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1885 				      MGMT_STATUS_BUSY);
1886 		goto failed;
1887 	}
1888 
1889 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1890 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1891 		goto failed;
1892 	}
1893 
1894 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1895 	if (!cmd)
1896 		err = -ENOMEM;
1897 	else
1898 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1899 					 set_ssp_complete);
1900 
1901 	if (err < 0) {
1902 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1903 				      MGMT_STATUS_FAILED);
1904 
1905 		if (cmd)
1906 			mgmt_pending_remove(cmd);
1907 	}
1908 
1909 failed:
1910 	hci_dev_unlock(hdev);
1911 	return err;
1912 }
1913 
1914 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1915 {
1916 	struct mgmt_mode *cp = data;
1917 	bool changed;
1918 	u8 status;
1919 	int err;
1920 
1921 	bt_dev_dbg(hdev, "sock %p", sk);
1922 
1923 	if (!IS_ENABLED(CONFIG_BT_HS))
1924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1925 				       MGMT_STATUS_NOT_SUPPORTED);
1926 
1927 	status = mgmt_bredr_support(hdev);
1928 	if (status)
1929 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1930 
1931 	if (!lmp_ssp_capable(hdev))
1932 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1933 				       MGMT_STATUS_NOT_SUPPORTED);
1934 
1935 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1936 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1937 				       MGMT_STATUS_REJECTED);
1938 
1939 	if (cp->val != 0x00 && cp->val != 0x01)
1940 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1941 				       MGMT_STATUS_INVALID_PARAMS);
1942 
1943 	hci_dev_lock(hdev);
1944 
1945 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1946 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1947 				      MGMT_STATUS_BUSY);
1948 		goto unlock;
1949 	}
1950 
1951 	if (cp->val) {
1952 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1953 	} else {
1954 		if (hdev_is_powered(hdev)) {
1955 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1956 					      MGMT_STATUS_REJECTED);
1957 			goto unlock;
1958 		}
1959 
1960 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1961 	}
1962 
1963 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1964 	if (err < 0)
1965 		goto unlock;
1966 
1967 	if (changed)
1968 		err = new_settings(hdev, sk);
1969 
1970 unlock:
1971 	hci_dev_unlock(hdev);
1972 	return err;
1973 }
1974 
1975 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
1976 {
1977 	struct cmd_lookup match = { NULL, hdev };
1978 	u8 status = mgmt_status(err);
1979 
1980 	bt_dev_dbg(hdev, "err %d", err);
1981 
1982 	if (status) {
1983 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1984 							&status);
1985 		return;
1986 	}
1987 
1988 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1989 
1990 	new_settings(hdev, match.sk);
1991 
1992 	if (match.sk)
1993 		sock_put(match.sk);
1994 }
1995 
1996 static int set_le_sync(struct hci_dev *hdev, void *data)
1997 {
1998 	struct mgmt_pending_cmd *cmd = data;
1999 	struct mgmt_mode *cp = cmd->param;
2000 	u8 val = !!cp->val;
2001 	int err;
2002 
2003 	if (!val) {
2004 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2005 			hci_disable_advertising_sync(hdev);
2006 
2007 		if (ext_adv_capable(hdev))
2008 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2009 	} else {
2010 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2011 	}
2012 
2013 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2014 
2015 	/* Make sure the controller has a good default for
2016 	 * advertising data. Restrict the update to when LE
2017 	 * has actually been enabled. During power on, the
2018 	 * update in powered_update_hci will take care of it.
2019 	 */
2020 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2021 		if (ext_adv_capable(hdev)) {
2022 			int status;
2023 
2024 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2025 			if (!status)
2026 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2027 		} else {
2028 			hci_update_adv_data_sync(hdev, 0x00);
2029 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2030 		}
2031 
2032 		hci_update_passive_scan(hdev);
2033 	}
2034 
2035 	return err;
2036 }
2037 
2038 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2039 {
2040 	struct mgmt_mode *cp = data;
2041 	struct mgmt_pending_cmd *cmd;
2042 	int err;
2043 	u8 val, enabled;
2044 
2045 	bt_dev_dbg(hdev, "sock %p", sk);
2046 
2047 	if (!lmp_le_capable(hdev))
2048 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2049 				       MGMT_STATUS_NOT_SUPPORTED);
2050 
2051 	if (cp->val != 0x00 && cp->val != 0x01)
2052 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2053 				       MGMT_STATUS_INVALID_PARAMS);
2054 
2055 	/* Bluetooth single mode LE only controllers or dual-mode
2056 	 * controllers configured as LE only devices, do not allow
2057 	 * switching LE off. These have either LE enabled explicitly
2058 	 * or BR/EDR has been previously switched off.
2059 	 *
2060 	 * When trying to enable an already enabled LE, then gracefully
2061 	 * send a positive response. Trying to disable it however will
2062 	 * result into rejection.
2063 	 */
2064 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2065 		if (cp->val == 0x01)
2066 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2067 
2068 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2069 				       MGMT_STATUS_REJECTED);
2070 	}
2071 
2072 	hci_dev_lock(hdev);
2073 
2074 	val = !!cp->val;
2075 	enabled = lmp_host_le_capable(hdev);
2076 
2077 	if (!val)
2078 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2079 
2080 	if (!hdev_is_powered(hdev) || val == enabled) {
2081 		bool changed = false;
2082 
2083 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2084 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2085 			changed = true;
2086 		}
2087 
2088 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2089 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2090 			changed = true;
2091 		}
2092 
2093 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2094 		if (err < 0)
2095 			goto unlock;
2096 
2097 		if (changed)
2098 			err = new_settings(hdev, sk);
2099 
2100 		goto unlock;
2101 	}
2102 
2103 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2104 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2105 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2106 				      MGMT_STATUS_BUSY);
2107 		goto unlock;
2108 	}
2109 
2110 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2111 	if (!cmd)
2112 		err = -ENOMEM;
2113 	else
2114 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2115 					 set_le_complete);
2116 
2117 	if (err < 0) {
2118 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2119 				      MGMT_STATUS_FAILED);
2120 
2121 		if (cmd)
2122 			mgmt_pending_remove(cmd);
2123 	}
2124 
2125 unlock:
2126 	hci_dev_unlock(hdev);
2127 	return err;
2128 }
2129 
2130 /* This is a helper function to test for pending mgmt commands that can
2131  * cause CoD or EIR HCI commands. We can only allow one such pending
2132  * mgmt command at a time since otherwise we cannot easily track what
2133  * the current values are, will be, and based on that calculate if a new
2134  * HCI command needs to be sent and if yes with what value.
2135  */
2136 static bool pending_eir_or_class(struct hci_dev *hdev)
2137 {
2138 	struct mgmt_pending_cmd *cmd;
2139 
2140 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2141 		switch (cmd->opcode) {
2142 		case MGMT_OP_ADD_UUID:
2143 		case MGMT_OP_REMOVE_UUID:
2144 		case MGMT_OP_SET_DEV_CLASS:
2145 		case MGMT_OP_SET_POWERED:
2146 			return true;
2147 		}
2148 	}
2149 
2150 	return false;
2151 }
2152 
2153 static const u8 bluetooth_base_uuid[] = {
2154 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2155 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2156 };
2157 
2158 static u8 get_uuid_size(const u8 *uuid)
2159 {
2160 	u32 val;
2161 
2162 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2163 		return 128;
2164 
2165 	val = get_unaligned_le32(&uuid[12]);
2166 	if (val > 0xffff)
2167 		return 32;
2168 
2169 	return 16;
2170 }
2171 
2172 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2173 {
2174 	struct mgmt_pending_cmd *cmd = data;
2175 
2176 	bt_dev_dbg(hdev, "err %d", err);
2177 
2178 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2179 			  mgmt_status(err), hdev->dev_class, 3);
2180 
2181 	mgmt_pending_free(cmd);
2182 }
2183 
2184 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2185 {
2186 	int err;
2187 
2188 	err = hci_update_class_sync(hdev);
2189 	if (err)
2190 		return err;
2191 
2192 	return hci_update_eir_sync(hdev);
2193 }
2194 
2195 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2196 {
2197 	struct mgmt_cp_add_uuid *cp = data;
2198 	struct mgmt_pending_cmd *cmd;
2199 	struct bt_uuid *uuid;
2200 	int err;
2201 
2202 	bt_dev_dbg(hdev, "sock %p", sk);
2203 
2204 	hci_dev_lock(hdev);
2205 
2206 	if (pending_eir_or_class(hdev)) {
2207 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2208 				      MGMT_STATUS_BUSY);
2209 		goto failed;
2210 	}
2211 
2212 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2213 	if (!uuid) {
2214 		err = -ENOMEM;
2215 		goto failed;
2216 	}
2217 
2218 	memcpy(uuid->uuid, cp->uuid, 16);
2219 	uuid->svc_hint = cp->svc_hint;
2220 	uuid->size = get_uuid_size(cp->uuid);
2221 
2222 	list_add_tail(&uuid->list, &hdev->uuids);
2223 
2224 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2225 	if (!cmd) {
2226 		err = -ENOMEM;
2227 		goto failed;
2228 	}
2229 
2230 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2231 	if (err < 0) {
2232 		mgmt_pending_free(cmd);
2233 		goto failed;
2234 	}
2235 
2236 failed:
2237 	hci_dev_unlock(hdev);
2238 	return err;
2239 }
2240 
2241 static bool enable_service_cache(struct hci_dev *hdev)
2242 {
2243 	if (!hdev_is_powered(hdev))
2244 		return false;
2245 
2246 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2247 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2248 				   CACHE_TIMEOUT);
2249 		return true;
2250 	}
2251 
2252 	return false;
2253 }
2254 
2255 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2256 {
2257 	int err;
2258 
2259 	err = hci_update_class_sync(hdev);
2260 	if (err)
2261 		return err;
2262 
2263 	return hci_update_eir_sync(hdev);
2264 }
2265 
2266 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2267 		       u16 len)
2268 {
2269 	struct mgmt_cp_remove_uuid *cp = data;
2270 	struct mgmt_pending_cmd *cmd;
2271 	struct bt_uuid *match, *tmp;
2272 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2273 	int err, found;
2274 
2275 	bt_dev_dbg(hdev, "sock %p", sk);
2276 
2277 	hci_dev_lock(hdev);
2278 
2279 	if (pending_eir_or_class(hdev)) {
2280 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2281 				      MGMT_STATUS_BUSY);
2282 		goto unlock;
2283 	}
2284 
2285 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2286 		hci_uuids_clear(hdev);
2287 
2288 		if (enable_service_cache(hdev)) {
2289 			err = mgmt_cmd_complete(sk, hdev->id,
2290 						MGMT_OP_REMOVE_UUID,
2291 						0, hdev->dev_class, 3);
2292 			goto unlock;
2293 		}
2294 
2295 		goto update_class;
2296 	}
2297 
2298 	found = 0;
2299 
2300 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2301 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2302 			continue;
2303 
2304 		list_del(&match->list);
2305 		kfree(match);
2306 		found++;
2307 	}
2308 
2309 	if (found == 0) {
2310 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2311 				      MGMT_STATUS_INVALID_PARAMS);
2312 		goto unlock;
2313 	}
2314 
2315 update_class:
2316 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2317 	if (!cmd) {
2318 		err = -ENOMEM;
2319 		goto unlock;
2320 	}
2321 
2322 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2323 				 mgmt_class_complete);
2324 	if (err < 0)
2325 		mgmt_pending_free(cmd);
2326 
2327 unlock:
2328 	hci_dev_unlock(hdev);
2329 	return err;
2330 }
2331 
2332 static int set_class_sync(struct hci_dev *hdev, void *data)
2333 {
2334 	int err = 0;
2335 
2336 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2337 		cancel_delayed_work_sync(&hdev->service_cache);
2338 		err = hci_update_eir_sync(hdev);
2339 	}
2340 
2341 	if (err)
2342 		return err;
2343 
2344 	return hci_update_class_sync(hdev);
2345 }
2346 
2347 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2348 			 u16 len)
2349 {
2350 	struct mgmt_cp_set_dev_class *cp = data;
2351 	struct mgmt_pending_cmd *cmd;
2352 	int err;
2353 
2354 	bt_dev_dbg(hdev, "sock %p", sk);
2355 
2356 	if (!lmp_bredr_capable(hdev))
2357 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2358 				       MGMT_STATUS_NOT_SUPPORTED);
2359 
2360 	hci_dev_lock(hdev);
2361 
2362 	if (pending_eir_or_class(hdev)) {
2363 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2364 				      MGMT_STATUS_BUSY);
2365 		goto unlock;
2366 	}
2367 
2368 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2369 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2370 				      MGMT_STATUS_INVALID_PARAMS);
2371 		goto unlock;
2372 	}
2373 
2374 	hdev->major_class = cp->major;
2375 	hdev->minor_class = cp->minor;
2376 
2377 	if (!hdev_is_powered(hdev)) {
2378 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2379 					hdev->dev_class, 3);
2380 		goto unlock;
2381 	}
2382 
2383 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2384 	if (!cmd) {
2385 		err = -ENOMEM;
2386 		goto unlock;
2387 	}
2388 
2389 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2390 				 mgmt_class_complete);
2391 	if (err < 0)
2392 		mgmt_pending_free(cmd);
2393 
2394 unlock:
2395 	hci_dev_unlock(hdev);
2396 	return err;
2397 }
2398 
2399 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2400 			  u16 len)
2401 {
2402 	struct mgmt_cp_load_link_keys *cp = data;
2403 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2404 				   sizeof(struct mgmt_link_key_info));
2405 	u16 key_count, expected_len;
2406 	bool changed;
2407 	int i;
2408 
2409 	bt_dev_dbg(hdev, "sock %p", sk);
2410 
2411 	if (!lmp_bredr_capable(hdev))
2412 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2413 				       MGMT_STATUS_NOT_SUPPORTED);
2414 
2415 	key_count = __le16_to_cpu(cp->key_count);
2416 	if (key_count > max_key_count) {
2417 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2418 			   key_count);
2419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2420 				       MGMT_STATUS_INVALID_PARAMS);
2421 	}
2422 
2423 	expected_len = struct_size(cp, keys, key_count);
2424 	if (expected_len != len) {
2425 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2426 			   expected_len, len);
2427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2428 				       MGMT_STATUS_INVALID_PARAMS);
2429 	}
2430 
2431 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2433 				       MGMT_STATUS_INVALID_PARAMS);
2434 
2435 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2436 		   key_count);
2437 
2438 	for (i = 0; i < key_count; i++) {
2439 		struct mgmt_link_key_info *key = &cp->keys[i];
2440 
2441 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2442 			return mgmt_cmd_status(sk, hdev->id,
2443 					       MGMT_OP_LOAD_LINK_KEYS,
2444 					       MGMT_STATUS_INVALID_PARAMS);
2445 	}
2446 
2447 	hci_dev_lock(hdev);
2448 
2449 	hci_link_keys_clear(hdev);
2450 
2451 	if (cp->debug_keys)
2452 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2453 	else
2454 		changed = hci_dev_test_and_clear_flag(hdev,
2455 						      HCI_KEEP_DEBUG_KEYS);
2456 
2457 	if (changed)
2458 		new_settings(hdev, NULL);
2459 
2460 	for (i = 0; i < key_count; i++) {
2461 		struct mgmt_link_key_info *key = &cp->keys[i];
2462 
2463 		if (hci_is_blocked_key(hdev,
2464 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2465 				       key->val)) {
2466 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2467 				    &key->addr.bdaddr);
2468 			continue;
2469 		}
2470 
2471 		/* Always ignore debug keys and require a new pairing if
2472 		 * the user wants to use them.
2473 		 */
2474 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2475 			continue;
2476 
2477 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2478 				 key->type, key->pin_len, NULL);
2479 	}
2480 
2481 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2482 
2483 	hci_dev_unlock(hdev);
2484 
2485 	return 0;
2486 }
2487 
2488 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2489 			   u8 addr_type, struct sock *skip_sk)
2490 {
2491 	struct mgmt_ev_device_unpaired ev;
2492 
2493 	bacpy(&ev.addr.bdaddr, bdaddr);
2494 	ev.addr.type = addr_type;
2495 
2496 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2497 			  skip_sk);
2498 }
2499 
2500 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2501 			 u16 len)
2502 {
2503 	struct mgmt_cp_unpair_device *cp = data;
2504 	struct mgmt_rp_unpair_device rp;
2505 	struct hci_conn_params *params;
2506 	struct mgmt_pending_cmd *cmd;
2507 	struct hci_conn *conn;
2508 	u8 addr_type;
2509 	int err;
2510 
2511 	memset(&rp, 0, sizeof(rp));
2512 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2513 	rp.addr.type = cp->addr.type;
2514 
2515 	if (!bdaddr_type_is_valid(cp->addr.type))
2516 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2517 					 MGMT_STATUS_INVALID_PARAMS,
2518 					 &rp, sizeof(rp));
2519 
2520 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2521 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2522 					 MGMT_STATUS_INVALID_PARAMS,
2523 					 &rp, sizeof(rp));
2524 
2525 	hci_dev_lock(hdev);
2526 
2527 	if (!hdev_is_powered(hdev)) {
2528 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2529 					MGMT_STATUS_NOT_POWERED, &rp,
2530 					sizeof(rp));
2531 		goto unlock;
2532 	}
2533 
2534 	if (cp->addr.type == BDADDR_BREDR) {
2535 		/* If disconnection is requested, then look up the
2536 		 * connection. If the remote device is connected, it
2537 		 * will be later used to terminate the link.
2538 		 *
2539 		 * Setting it to NULL explicitly will cause no
2540 		 * termination of the link.
2541 		 */
2542 		if (cp->disconnect)
2543 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2544 						       &cp->addr.bdaddr);
2545 		else
2546 			conn = NULL;
2547 
2548 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2549 		if (err < 0) {
2550 			err = mgmt_cmd_complete(sk, hdev->id,
2551 						MGMT_OP_UNPAIR_DEVICE,
2552 						MGMT_STATUS_NOT_PAIRED, &rp,
2553 						sizeof(rp));
2554 			goto unlock;
2555 		}
2556 
2557 		goto done;
2558 	}
2559 
2560 	/* LE address type */
2561 	addr_type = le_addr_type(cp->addr.type);
2562 
2563 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2564 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2565 	if (err < 0) {
2566 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2567 					MGMT_STATUS_NOT_PAIRED, &rp,
2568 					sizeof(rp));
2569 		goto unlock;
2570 	}
2571 
2572 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2573 	if (!conn) {
2574 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2575 		goto done;
2576 	}
2577 
2578 
2579 	/* Defer clearing up the connection parameters until closing to
2580 	 * give a chance of keeping them if a repairing happens.
2581 	 */
2582 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2583 
2584 	/* Disable auto-connection parameters if present */
2585 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2586 	if (params) {
2587 		if (params->explicit_connect)
2588 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2589 		else
2590 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2591 	}
2592 
2593 	/* If disconnection is not requested, then clear the connection
2594 	 * variable so that the link is not terminated.
2595 	 */
2596 	if (!cp->disconnect)
2597 		conn = NULL;
2598 
2599 done:
2600 	/* If the connection variable is set, then termination of the
2601 	 * link is requested.
2602 	 */
2603 	if (!conn) {
2604 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2605 					&rp, sizeof(rp));
2606 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2607 		goto unlock;
2608 	}
2609 
2610 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2611 			       sizeof(*cp));
2612 	if (!cmd) {
2613 		err = -ENOMEM;
2614 		goto unlock;
2615 	}
2616 
2617 	cmd->cmd_complete = addr_cmd_complete;
2618 
2619 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2620 	if (err < 0)
2621 		mgmt_pending_remove(cmd);
2622 
2623 unlock:
2624 	hci_dev_unlock(hdev);
2625 	return err;
2626 }
2627 
2628 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2629 		      u16 len)
2630 {
2631 	struct mgmt_cp_disconnect *cp = data;
2632 	struct mgmt_rp_disconnect rp;
2633 	struct mgmt_pending_cmd *cmd;
2634 	struct hci_conn *conn;
2635 	int err;
2636 
2637 	bt_dev_dbg(hdev, "sock %p", sk);
2638 
2639 	memset(&rp, 0, sizeof(rp));
2640 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2641 	rp.addr.type = cp->addr.type;
2642 
2643 	if (!bdaddr_type_is_valid(cp->addr.type))
2644 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2645 					 MGMT_STATUS_INVALID_PARAMS,
2646 					 &rp, sizeof(rp));
2647 
2648 	hci_dev_lock(hdev);
2649 
2650 	if (!test_bit(HCI_UP, &hdev->flags)) {
2651 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2652 					MGMT_STATUS_NOT_POWERED, &rp,
2653 					sizeof(rp));
2654 		goto failed;
2655 	}
2656 
2657 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2658 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2659 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2660 		goto failed;
2661 	}
2662 
2663 	if (cp->addr.type == BDADDR_BREDR)
2664 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2665 					       &cp->addr.bdaddr);
2666 	else
2667 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2668 					       le_addr_type(cp->addr.type));
2669 
2670 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2671 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2672 					MGMT_STATUS_NOT_CONNECTED, &rp,
2673 					sizeof(rp));
2674 		goto failed;
2675 	}
2676 
2677 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2678 	if (!cmd) {
2679 		err = -ENOMEM;
2680 		goto failed;
2681 	}
2682 
2683 	cmd->cmd_complete = generic_cmd_complete;
2684 
2685 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2686 	if (err < 0)
2687 		mgmt_pending_remove(cmd);
2688 
2689 failed:
2690 	hci_dev_unlock(hdev);
2691 	return err;
2692 }
2693 
2694 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2695 {
2696 	switch (link_type) {
2697 	case LE_LINK:
2698 		switch (addr_type) {
2699 		case ADDR_LE_DEV_PUBLIC:
2700 			return BDADDR_LE_PUBLIC;
2701 
2702 		default:
2703 			/* Fallback to LE Random address type */
2704 			return BDADDR_LE_RANDOM;
2705 		}
2706 
2707 	default:
2708 		/* Fallback to BR/EDR type */
2709 		return BDADDR_BREDR;
2710 	}
2711 }
2712 
2713 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2714 			   u16 data_len)
2715 {
2716 	struct mgmt_rp_get_connections *rp;
2717 	struct hci_conn *c;
2718 	int err;
2719 	u16 i;
2720 
2721 	bt_dev_dbg(hdev, "sock %p", sk);
2722 
2723 	hci_dev_lock(hdev);
2724 
2725 	if (!hdev_is_powered(hdev)) {
2726 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2727 				      MGMT_STATUS_NOT_POWERED);
2728 		goto unlock;
2729 	}
2730 
2731 	i = 0;
2732 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2733 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2734 			i++;
2735 	}
2736 
2737 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2738 	if (!rp) {
2739 		err = -ENOMEM;
2740 		goto unlock;
2741 	}
2742 
2743 	i = 0;
2744 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2745 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2746 			continue;
2747 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2748 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2749 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2750 			continue;
2751 		i++;
2752 	}
2753 
2754 	rp->conn_count = cpu_to_le16(i);
2755 
2756 	/* Recalculate length in case of filtered SCO connections, etc */
2757 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2758 				struct_size(rp, addr, i));
2759 
2760 	kfree(rp);
2761 
2762 unlock:
2763 	hci_dev_unlock(hdev);
2764 	return err;
2765 }
2766 
2767 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2768 				   struct mgmt_cp_pin_code_neg_reply *cp)
2769 {
2770 	struct mgmt_pending_cmd *cmd;
2771 	int err;
2772 
2773 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2774 			       sizeof(*cp));
2775 	if (!cmd)
2776 		return -ENOMEM;
2777 
2778 	cmd->cmd_complete = addr_cmd_complete;
2779 
2780 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2781 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2782 	if (err < 0)
2783 		mgmt_pending_remove(cmd);
2784 
2785 	return err;
2786 }
2787 
2788 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2789 			  u16 len)
2790 {
2791 	struct hci_conn *conn;
2792 	struct mgmt_cp_pin_code_reply *cp = data;
2793 	struct hci_cp_pin_code_reply reply;
2794 	struct mgmt_pending_cmd *cmd;
2795 	int err;
2796 
2797 	bt_dev_dbg(hdev, "sock %p", sk);
2798 
2799 	hci_dev_lock(hdev);
2800 
2801 	if (!hdev_is_powered(hdev)) {
2802 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2803 				      MGMT_STATUS_NOT_POWERED);
2804 		goto failed;
2805 	}
2806 
2807 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2808 	if (!conn) {
2809 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2810 				      MGMT_STATUS_NOT_CONNECTED);
2811 		goto failed;
2812 	}
2813 
2814 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2815 		struct mgmt_cp_pin_code_neg_reply ncp;
2816 
2817 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2818 
2819 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2820 
2821 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2822 		if (err >= 0)
2823 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2824 					      MGMT_STATUS_INVALID_PARAMS);
2825 
2826 		goto failed;
2827 	}
2828 
2829 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2830 	if (!cmd) {
2831 		err = -ENOMEM;
2832 		goto failed;
2833 	}
2834 
2835 	cmd->cmd_complete = addr_cmd_complete;
2836 
2837 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2838 	reply.pin_len = cp->pin_len;
2839 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2840 
2841 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2842 	if (err < 0)
2843 		mgmt_pending_remove(cmd);
2844 
2845 failed:
2846 	hci_dev_unlock(hdev);
2847 	return err;
2848 }
2849 
2850 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2851 			     u16 len)
2852 {
2853 	struct mgmt_cp_set_io_capability *cp = data;
2854 
2855 	bt_dev_dbg(hdev, "sock %p", sk);
2856 
2857 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2858 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2859 				       MGMT_STATUS_INVALID_PARAMS);
2860 
2861 	hci_dev_lock(hdev);
2862 
2863 	hdev->io_capability = cp->io_capability;
2864 
2865 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2866 
2867 	hci_dev_unlock(hdev);
2868 
2869 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2870 				 NULL, 0);
2871 }
2872 
2873 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2874 {
2875 	struct hci_dev *hdev = conn->hdev;
2876 	struct mgmt_pending_cmd *cmd;
2877 
2878 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2879 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2880 			continue;
2881 
2882 		if (cmd->user_data != conn)
2883 			continue;
2884 
2885 		return cmd;
2886 	}
2887 
2888 	return NULL;
2889 }
2890 
2891 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2892 {
2893 	struct mgmt_rp_pair_device rp;
2894 	struct hci_conn *conn = cmd->user_data;
2895 	int err;
2896 
2897 	bacpy(&rp.addr.bdaddr, &conn->dst);
2898 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2899 
2900 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2901 				status, &rp, sizeof(rp));
2902 
2903 	/* So we don't get further callbacks for this connection */
2904 	conn->connect_cfm_cb = NULL;
2905 	conn->security_cfm_cb = NULL;
2906 	conn->disconn_cfm_cb = NULL;
2907 
2908 	hci_conn_drop(conn);
2909 
2910 	/* The device is paired so there is no need to remove
2911 	 * its connection parameters anymore.
2912 	 */
2913 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2914 
2915 	hci_conn_put(conn);
2916 
2917 	return err;
2918 }
2919 
2920 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2921 {
2922 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2923 	struct mgmt_pending_cmd *cmd;
2924 
2925 	cmd = find_pairing(conn);
2926 	if (cmd) {
2927 		cmd->cmd_complete(cmd, status);
2928 		mgmt_pending_remove(cmd);
2929 	}
2930 }
2931 
2932 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2933 {
2934 	struct mgmt_pending_cmd *cmd;
2935 
2936 	BT_DBG("status %u", status);
2937 
2938 	cmd = find_pairing(conn);
2939 	if (!cmd) {
2940 		BT_DBG("Unable to find a pending command");
2941 		return;
2942 	}
2943 
2944 	cmd->cmd_complete(cmd, mgmt_status(status));
2945 	mgmt_pending_remove(cmd);
2946 }
2947 
2948 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2949 {
2950 	struct mgmt_pending_cmd *cmd;
2951 
2952 	BT_DBG("status %u", status);
2953 
2954 	if (!status)
2955 		return;
2956 
2957 	cmd = find_pairing(conn);
2958 	if (!cmd) {
2959 		BT_DBG("Unable to find a pending command");
2960 		return;
2961 	}
2962 
2963 	cmd->cmd_complete(cmd, mgmt_status(status));
2964 	mgmt_pending_remove(cmd);
2965 }
2966 
2967 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2968 		       u16 len)
2969 {
2970 	struct mgmt_cp_pair_device *cp = data;
2971 	struct mgmt_rp_pair_device rp;
2972 	struct mgmt_pending_cmd *cmd;
2973 	u8 sec_level, auth_type;
2974 	struct hci_conn *conn;
2975 	int err;
2976 
2977 	bt_dev_dbg(hdev, "sock %p", sk);
2978 
2979 	memset(&rp, 0, sizeof(rp));
2980 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2981 	rp.addr.type = cp->addr.type;
2982 
2983 	if (!bdaddr_type_is_valid(cp->addr.type))
2984 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2985 					 MGMT_STATUS_INVALID_PARAMS,
2986 					 &rp, sizeof(rp));
2987 
2988 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2989 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2990 					 MGMT_STATUS_INVALID_PARAMS,
2991 					 &rp, sizeof(rp));
2992 
2993 	hci_dev_lock(hdev);
2994 
2995 	if (!hdev_is_powered(hdev)) {
2996 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2997 					MGMT_STATUS_NOT_POWERED, &rp,
2998 					sizeof(rp));
2999 		goto unlock;
3000 	}
3001 
3002 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3003 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3004 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3005 					sizeof(rp));
3006 		goto unlock;
3007 	}
3008 
3009 	sec_level = BT_SECURITY_MEDIUM;
3010 	auth_type = HCI_AT_DEDICATED_BONDING;
3011 
3012 	if (cp->addr.type == BDADDR_BREDR) {
3013 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3014 				       auth_type, CONN_REASON_PAIR_DEVICE);
3015 	} else {
3016 		u8 addr_type = le_addr_type(cp->addr.type);
3017 		struct hci_conn_params *p;
3018 
3019 		/* When pairing a new device, it is expected to remember
3020 		 * this device for future connections. Adding the connection
3021 		 * parameter information ahead of time allows tracking
3022 		 * of the peripheral preferred values and will speed up any
3023 		 * further connection establishment.
3024 		 *
3025 		 * If connection parameters already exist, then they
3026 		 * will be kept and this function does nothing.
3027 		 */
3028 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3029 
3030 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3031 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3032 
3033 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3034 					   sec_level, HCI_LE_CONN_TIMEOUT,
3035 					   CONN_REASON_PAIR_DEVICE);
3036 	}
3037 
3038 	if (IS_ERR(conn)) {
3039 		int status;
3040 
3041 		if (PTR_ERR(conn) == -EBUSY)
3042 			status = MGMT_STATUS_BUSY;
3043 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3044 			status = MGMT_STATUS_NOT_SUPPORTED;
3045 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3046 			status = MGMT_STATUS_REJECTED;
3047 		else
3048 			status = MGMT_STATUS_CONNECT_FAILED;
3049 
3050 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3051 					status, &rp, sizeof(rp));
3052 		goto unlock;
3053 	}
3054 
3055 	if (conn->connect_cfm_cb) {
3056 		hci_conn_drop(conn);
3057 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3058 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3059 		goto unlock;
3060 	}
3061 
3062 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3063 	if (!cmd) {
3064 		err = -ENOMEM;
3065 		hci_conn_drop(conn);
3066 		goto unlock;
3067 	}
3068 
3069 	cmd->cmd_complete = pairing_complete;
3070 
3071 	/* For LE, just connecting isn't a proof that the pairing finished */
3072 	if (cp->addr.type == BDADDR_BREDR) {
3073 		conn->connect_cfm_cb = pairing_complete_cb;
3074 		conn->security_cfm_cb = pairing_complete_cb;
3075 		conn->disconn_cfm_cb = pairing_complete_cb;
3076 	} else {
3077 		conn->connect_cfm_cb = le_pairing_complete_cb;
3078 		conn->security_cfm_cb = le_pairing_complete_cb;
3079 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3080 	}
3081 
3082 	conn->io_capability = cp->io_cap;
3083 	cmd->user_data = hci_conn_get(conn);
3084 
3085 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3086 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3087 		cmd->cmd_complete(cmd, 0);
3088 		mgmt_pending_remove(cmd);
3089 	}
3090 
3091 	err = 0;
3092 
3093 unlock:
3094 	hci_dev_unlock(hdev);
3095 	return err;
3096 }
3097 
3098 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3099 			      u16 len)
3100 {
3101 	struct mgmt_addr_info *addr = data;
3102 	struct mgmt_pending_cmd *cmd;
3103 	struct hci_conn *conn;
3104 	int err;
3105 
3106 	bt_dev_dbg(hdev, "sock %p", sk);
3107 
3108 	hci_dev_lock(hdev);
3109 
3110 	if (!hdev_is_powered(hdev)) {
3111 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3112 				      MGMT_STATUS_NOT_POWERED);
3113 		goto unlock;
3114 	}
3115 
3116 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3117 	if (!cmd) {
3118 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3119 				      MGMT_STATUS_INVALID_PARAMS);
3120 		goto unlock;
3121 	}
3122 
3123 	conn = cmd->user_data;
3124 
3125 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3126 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3127 				      MGMT_STATUS_INVALID_PARAMS);
3128 		goto unlock;
3129 	}
3130 
3131 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3132 	mgmt_pending_remove(cmd);
3133 
3134 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3135 				addr, sizeof(*addr));
3136 
3137 	/* Since user doesn't want to proceed with the connection, abort any
3138 	 * ongoing pairing and then terminate the link if it was created
3139 	 * because of the pair device action.
3140 	 */
3141 	if (addr->type == BDADDR_BREDR)
3142 		hci_remove_link_key(hdev, &addr->bdaddr);
3143 	else
3144 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3145 					      le_addr_type(addr->type));
3146 
3147 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3148 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3149 
3150 unlock:
3151 	hci_dev_unlock(hdev);
3152 	return err;
3153 }
3154 
3155 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3156 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3157 			     u16 hci_op, __le32 passkey)
3158 {
3159 	struct mgmt_pending_cmd *cmd;
3160 	struct hci_conn *conn;
3161 	int err;
3162 
3163 	hci_dev_lock(hdev);
3164 
3165 	if (!hdev_is_powered(hdev)) {
3166 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3167 					MGMT_STATUS_NOT_POWERED, addr,
3168 					sizeof(*addr));
3169 		goto done;
3170 	}
3171 
3172 	if (addr->type == BDADDR_BREDR)
3173 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3174 	else
3175 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3176 					       le_addr_type(addr->type));
3177 
3178 	if (!conn) {
3179 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3180 					MGMT_STATUS_NOT_CONNECTED, addr,
3181 					sizeof(*addr));
3182 		goto done;
3183 	}
3184 
3185 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3186 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3187 		if (!err)
3188 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3189 						MGMT_STATUS_SUCCESS, addr,
3190 						sizeof(*addr));
3191 		else
3192 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3193 						MGMT_STATUS_FAILED, addr,
3194 						sizeof(*addr));
3195 
3196 		goto done;
3197 	}
3198 
3199 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3200 	if (!cmd) {
3201 		err = -ENOMEM;
3202 		goto done;
3203 	}
3204 
3205 	cmd->cmd_complete = addr_cmd_complete;
3206 
3207 	/* Continue with pairing via HCI */
3208 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3209 		struct hci_cp_user_passkey_reply cp;
3210 
3211 		bacpy(&cp.bdaddr, &addr->bdaddr);
3212 		cp.passkey = passkey;
3213 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3214 	} else
3215 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3216 				   &addr->bdaddr);
3217 
3218 	if (err < 0)
3219 		mgmt_pending_remove(cmd);
3220 
3221 done:
3222 	hci_dev_unlock(hdev);
3223 	return err;
3224 }
3225 
3226 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3227 			      void *data, u16 len)
3228 {
3229 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3230 
3231 	bt_dev_dbg(hdev, "sock %p", sk);
3232 
3233 	return user_pairing_resp(sk, hdev, &cp->addr,
3234 				MGMT_OP_PIN_CODE_NEG_REPLY,
3235 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3236 }
3237 
3238 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3239 			      u16 len)
3240 {
3241 	struct mgmt_cp_user_confirm_reply *cp = data;
3242 
3243 	bt_dev_dbg(hdev, "sock %p", sk);
3244 
3245 	if (len != sizeof(*cp))
3246 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3247 				       MGMT_STATUS_INVALID_PARAMS);
3248 
3249 	return user_pairing_resp(sk, hdev, &cp->addr,
3250 				 MGMT_OP_USER_CONFIRM_REPLY,
3251 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3252 }
3253 
3254 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3255 				  void *data, u16 len)
3256 {
3257 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3258 
3259 	bt_dev_dbg(hdev, "sock %p", sk);
3260 
3261 	return user_pairing_resp(sk, hdev, &cp->addr,
3262 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3263 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3264 }
3265 
3266 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3267 			      u16 len)
3268 {
3269 	struct mgmt_cp_user_passkey_reply *cp = data;
3270 
3271 	bt_dev_dbg(hdev, "sock %p", sk);
3272 
3273 	return user_pairing_resp(sk, hdev, &cp->addr,
3274 				 MGMT_OP_USER_PASSKEY_REPLY,
3275 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3276 }
3277 
3278 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3279 				  void *data, u16 len)
3280 {
3281 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3282 
3283 	bt_dev_dbg(hdev, "sock %p", sk);
3284 
3285 	return user_pairing_resp(sk, hdev, &cp->addr,
3286 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3287 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3288 }
3289 
3290 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3291 {
3292 	struct adv_info *adv_instance;
3293 
3294 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3295 	if (!adv_instance)
3296 		return 0;
3297 
3298 	/* stop if current instance doesn't need to be changed */
3299 	if (!(adv_instance->flags & flags))
3300 		return 0;
3301 
3302 	cancel_adv_timeout(hdev);
3303 
3304 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3305 	if (!adv_instance)
3306 		return 0;
3307 
3308 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3309 
3310 	return 0;
3311 }
3312 
3313 static int name_changed_sync(struct hci_dev *hdev, void *data)
3314 {
3315 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3316 }
3317 
3318 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3319 {
3320 	struct mgmt_pending_cmd *cmd = data;
3321 	struct mgmt_cp_set_local_name *cp = cmd->param;
3322 	u8 status = mgmt_status(err);
3323 
3324 	bt_dev_dbg(hdev, "err %d", err);
3325 
3326 	if (status) {
3327 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3328 				status);
3329 	} else {
3330 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3331 				  cp, sizeof(*cp));
3332 
3333 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3334 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3335 	}
3336 
3337 	mgmt_pending_remove(cmd);
3338 }
3339 
3340 static int set_name_sync(struct hci_dev *hdev, void *data)
3341 {
3342 	if (lmp_bredr_capable(hdev)) {
3343 		hci_update_name_sync(hdev);
3344 		hci_update_eir_sync(hdev);
3345 	}
3346 
3347 	/* The name is stored in the scan response data and so
3348 	 * no need to update the advertising data here.
3349 	 */
3350 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3351 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3352 
3353 	return 0;
3354 }
3355 
3356 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3357 			  u16 len)
3358 {
3359 	struct mgmt_cp_set_local_name *cp = data;
3360 	struct mgmt_pending_cmd *cmd;
3361 	int err;
3362 
3363 	bt_dev_dbg(hdev, "sock %p", sk);
3364 
3365 	hci_dev_lock(hdev);
3366 
3367 	/* If the old values are the same as the new ones just return a
3368 	 * direct command complete event.
3369 	 */
3370 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3371 	    !memcmp(hdev->short_name, cp->short_name,
3372 		    sizeof(hdev->short_name))) {
3373 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3374 					data, len);
3375 		goto failed;
3376 	}
3377 
3378 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3379 
3380 	if (!hdev_is_powered(hdev)) {
3381 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3382 
3383 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3384 					data, len);
3385 		if (err < 0)
3386 			goto failed;
3387 
3388 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3389 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3390 		ext_info_changed(hdev, sk);
3391 
3392 		goto failed;
3393 	}
3394 
3395 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3396 	if (!cmd)
3397 		err = -ENOMEM;
3398 	else
3399 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3400 					 set_name_complete);
3401 
3402 	if (err < 0) {
3403 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3404 				      MGMT_STATUS_FAILED);
3405 
3406 		if (cmd)
3407 			mgmt_pending_remove(cmd);
3408 
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3413 
3414 failed:
3415 	hci_dev_unlock(hdev);
3416 	return err;
3417 }
3418 
3419 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3420 {
3421 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3422 }
3423 
3424 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3425 			  u16 len)
3426 {
3427 	struct mgmt_cp_set_appearance *cp = data;
3428 	u16 appearance;
3429 	int err;
3430 
3431 	bt_dev_dbg(hdev, "sock %p", sk);
3432 
3433 	if (!lmp_le_capable(hdev))
3434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3435 				       MGMT_STATUS_NOT_SUPPORTED);
3436 
3437 	appearance = le16_to_cpu(cp->appearance);
3438 
3439 	hci_dev_lock(hdev);
3440 
3441 	if (hdev->appearance != appearance) {
3442 		hdev->appearance = appearance;
3443 
3444 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3445 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3446 					   NULL);
3447 
3448 		ext_info_changed(hdev, sk);
3449 	}
3450 
3451 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3452 				0);
3453 
3454 	hci_dev_unlock(hdev);
3455 
3456 	return err;
3457 }
3458 
3459 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3460 				 void *data, u16 len)
3461 {
3462 	struct mgmt_rp_get_phy_configuration rp;
3463 
3464 	bt_dev_dbg(hdev, "sock %p", sk);
3465 
3466 	hci_dev_lock(hdev);
3467 
3468 	memset(&rp, 0, sizeof(rp));
3469 
3470 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3471 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3472 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3473 
3474 	hci_dev_unlock(hdev);
3475 
3476 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3477 				 &rp, sizeof(rp));
3478 }
3479 
3480 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3481 {
3482 	struct mgmt_ev_phy_configuration_changed ev;
3483 
3484 	memset(&ev, 0, sizeof(ev));
3485 
3486 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3487 
3488 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3489 			  sizeof(ev), skip);
3490 }
3491 
3492 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3493 {
3494 	struct mgmt_pending_cmd *cmd = data;
3495 	struct sk_buff *skb = cmd->skb;
3496 	u8 status = mgmt_status(err);
3497 
3498 	if (!status) {
3499 		if (!skb)
3500 			status = MGMT_STATUS_FAILED;
3501 		else if (IS_ERR(skb))
3502 			status = mgmt_status(PTR_ERR(skb));
3503 		else
3504 			status = mgmt_status(skb->data[0]);
3505 	}
3506 
3507 	bt_dev_dbg(hdev, "status %d", status);
3508 
3509 	if (status) {
3510 		mgmt_cmd_status(cmd->sk, hdev->id,
3511 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3512 	} else {
3513 		mgmt_cmd_complete(cmd->sk, hdev->id,
3514 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3515 				  NULL, 0);
3516 
3517 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3518 	}
3519 
3520 	if (skb && !IS_ERR(skb))
3521 		kfree_skb(skb);
3522 
3523 	mgmt_pending_remove(cmd);
3524 }
3525 
3526 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3530 	struct hci_cp_le_set_default_phy cp_phy;
3531 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3532 
3533 	memset(&cp_phy, 0, sizeof(cp_phy));
3534 
3535 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3536 		cp_phy.all_phys |= 0x01;
3537 
3538 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3539 		cp_phy.all_phys |= 0x02;
3540 
3541 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3542 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3543 
3544 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3545 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3546 
3547 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3548 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3549 
3550 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3551 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3552 
3553 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3554 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3555 
3556 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3557 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3558 
3559 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3560 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3561 
3562 	return 0;
3563 }
3564 
3565 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3566 				 void *data, u16 len)
3567 {
3568 	struct mgmt_cp_set_phy_configuration *cp = data;
3569 	struct mgmt_pending_cmd *cmd;
3570 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3571 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3572 	bool changed = false;
3573 	int err;
3574 
3575 	bt_dev_dbg(hdev, "sock %p", sk);
3576 
3577 	configurable_phys = get_configurable_phys(hdev);
3578 	supported_phys = get_supported_phys(hdev);
3579 	selected_phys = __le32_to_cpu(cp->selected_phys);
3580 
3581 	if (selected_phys & ~supported_phys)
3582 		return mgmt_cmd_status(sk, hdev->id,
3583 				       MGMT_OP_SET_PHY_CONFIGURATION,
3584 				       MGMT_STATUS_INVALID_PARAMS);
3585 
3586 	unconfigure_phys = supported_phys & ~configurable_phys;
3587 
3588 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3589 		return mgmt_cmd_status(sk, hdev->id,
3590 				       MGMT_OP_SET_PHY_CONFIGURATION,
3591 				       MGMT_STATUS_INVALID_PARAMS);
3592 
3593 	if (selected_phys == get_selected_phys(hdev))
3594 		return mgmt_cmd_complete(sk, hdev->id,
3595 					 MGMT_OP_SET_PHY_CONFIGURATION,
3596 					 0, NULL, 0);
3597 
3598 	hci_dev_lock(hdev);
3599 
3600 	if (!hdev_is_powered(hdev)) {
3601 		err = mgmt_cmd_status(sk, hdev->id,
3602 				      MGMT_OP_SET_PHY_CONFIGURATION,
3603 				      MGMT_STATUS_REJECTED);
3604 		goto unlock;
3605 	}
3606 
3607 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3608 		err = mgmt_cmd_status(sk, hdev->id,
3609 				      MGMT_OP_SET_PHY_CONFIGURATION,
3610 				      MGMT_STATUS_BUSY);
3611 		goto unlock;
3612 	}
3613 
3614 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3615 		pkt_type |= (HCI_DH3 | HCI_DM3);
3616 	else
3617 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3618 
3619 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3620 		pkt_type |= (HCI_DH5 | HCI_DM5);
3621 	else
3622 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3623 
3624 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3625 		pkt_type &= ~HCI_2DH1;
3626 	else
3627 		pkt_type |= HCI_2DH1;
3628 
3629 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3630 		pkt_type &= ~HCI_2DH3;
3631 	else
3632 		pkt_type |= HCI_2DH3;
3633 
3634 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3635 		pkt_type &= ~HCI_2DH5;
3636 	else
3637 		pkt_type |= HCI_2DH5;
3638 
3639 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3640 		pkt_type &= ~HCI_3DH1;
3641 	else
3642 		pkt_type |= HCI_3DH1;
3643 
3644 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3645 		pkt_type &= ~HCI_3DH3;
3646 	else
3647 		pkt_type |= HCI_3DH3;
3648 
3649 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3650 		pkt_type &= ~HCI_3DH5;
3651 	else
3652 		pkt_type |= HCI_3DH5;
3653 
3654 	if (pkt_type != hdev->pkt_type) {
3655 		hdev->pkt_type = pkt_type;
3656 		changed = true;
3657 	}
3658 
3659 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3660 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3661 		if (changed)
3662 			mgmt_phy_configuration_changed(hdev, sk);
3663 
3664 		err = mgmt_cmd_complete(sk, hdev->id,
3665 					MGMT_OP_SET_PHY_CONFIGURATION,
3666 					0, NULL, 0);
3667 
3668 		goto unlock;
3669 	}
3670 
3671 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3672 			       len);
3673 	if (!cmd)
3674 		err = -ENOMEM;
3675 	else
3676 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3677 					 set_default_phy_complete);
3678 
3679 	if (err < 0) {
3680 		err = mgmt_cmd_status(sk, hdev->id,
3681 				      MGMT_OP_SET_PHY_CONFIGURATION,
3682 				      MGMT_STATUS_FAILED);
3683 
3684 		if (cmd)
3685 			mgmt_pending_remove(cmd);
3686 	}
3687 
3688 unlock:
3689 	hci_dev_unlock(hdev);
3690 
3691 	return err;
3692 }
3693 
3694 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3695 			    u16 len)
3696 {
3697 	int err = MGMT_STATUS_SUCCESS;
3698 	struct mgmt_cp_set_blocked_keys *keys = data;
3699 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3700 				   sizeof(struct mgmt_blocked_key_info));
3701 	u16 key_count, expected_len;
3702 	int i;
3703 
3704 	bt_dev_dbg(hdev, "sock %p", sk);
3705 
3706 	key_count = __le16_to_cpu(keys->key_count);
3707 	if (key_count > max_key_count) {
3708 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3709 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3710 				       MGMT_STATUS_INVALID_PARAMS);
3711 	}
3712 
3713 	expected_len = struct_size(keys, keys, key_count);
3714 	if (expected_len != len) {
3715 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3716 			   expected_len, len);
3717 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3718 				       MGMT_STATUS_INVALID_PARAMS);
3719 	}
3720 
3721 	hci_dev_lock(hdev);
3722 
3723 	hci_blocked_keys_clear(hdev);
3724 
3725 	for (i = 0; i < keys->key_count; ++i) {
3726 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3727 
3728 		if (!b) {
3729 			err = MGMT_STATUS_NO_RESOURCES;
3730 			break;
3731 		}
3732 
3733 		b->type = keys->keys[i].type;
3734 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3735 		list_add_rcu(&b->list, &hdev->blocked_keys);
3736 	}
3737 	hci_dev_unlock(hdev);
3738 
3739 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3740 				err, NULL, 0);
3741 }
3742 
3743 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3744 			       void *data, u16 len)
3745 {
3746 	struct mgmt_mode *cp = data;
3747 	int err;
3748 	bool changed = false;
3749 
3750 	bt_dev_dbg(hdev, "sock %p", sk);
3751 
3752 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3753 		return mgmt_cmd_status(sk, hdev->id,
3754 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3755 				       MGMT_STATUS_NOT_SUPPORTED);
3756 
3757 	if (cp->val != 0x00 && cp->val != 0x01)
3758 		return mgmt_cmd_status(sk, hdev->id,
3759 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3760 				       MGMT_STATUS_INVALID_PARAMS);
3761 
3762 	hci_dev_lock(hdev);
3763 
3764 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3765 		err = mgmt_cmd_status(sk, hdev->id,
3766 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3767 				      MGMT_STATUS_BUSY);
3768 		goto unlock;
3769 	}
3770 
3771 	if (hdev_is_powered(hdev) &&
3772 	    !!cp->val != hci_dev_test_flag(hdev,
3773 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3774 		err = mgmt_cmd_status(sk, hdev->id,
3775 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3776 				      MGMT_STATUS_REJECTED);
3777 		goto unlock;
3778 	}
3779 
3780 	if (cp->val)
3781 		changed = !hci_dev_test_and_set_flag(hdev,
3782 						   HCI_WIDEBAND_SPEECH_ENABLED);
3783 	else
3784 		changed = hci_dev_test_and_clear_flag(hdev,
3785 						   HCI_WIDEBAND_SPEECH_ENABLED);
3786 
3787 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3788 	if (err < 0)
3789 		goto unlock;
3790 
3791 	if (changed)
3792 		err = new_settings(hdev, sk);
3793 
3794 unlock:
3795 	hci_dev_unlock(hdev);
3796 	return err;
3797 }
3798 
3799 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3800 			       void *data, u16 data_len)
3801 {
3802 	char buf[20];
3803 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3804 	u16 cap_len = 0;
3805 	u8 flags = 0;
3806 	u8 tx_power_range[2];
3807 
3808 	bt_dev_dbg(hdev, "sock %p", sk);
3809 
3810 	memset(&buf, 0, sizeof(buf));
3811 
3812 	hci_dev_lock(hdev);
3813 
3814 	/* When the Read Simple Pairing Options command is supported, then
3815 	 * the remote public key validation is supported.
3816 	 *
3817 	 * Alternatively, when Microsoft extensions are available, they can
3818 	 * indicate support for public key validation as well.
3819 	 */
3820 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3821 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3822 
3823 	flags |= 0x02;		/* Remote public key validation (LE) */
3824 
3825 	/* When the Read Encryption Key Size command is supported, then the
3826 	 * encryption key size is enforced.
3827 	 */
3828 	if (hdev->commands[20] & 0x10)
3829 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3830 
3831 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3832 
3833 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3834 				  &flags, 1);
3835 
3836 	/* When the Read Simple Pairing Options command is supported, then
3837 	 * also max encryption key size information is provided.
3838 	 */
3839 	if (hdev->commands[41] & 0x08)
3840 		cap_len = eir_append_le16(rp->cap, cap_len,
3841 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3842 					  hdev->max_enc_key_size);
3843 
3844 	cap_len = eir_append_le16(rp->cap, cap_len,
3845 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3846 				  SMP_MAX_ENC_KEY_SIZE);
3847 
3848 	/* Append the min/max LE tx power parameters if we were able to fetch
3849 	 * it from the controller
3850 	 */
3851 	if (hdev->commands[38] & 0x80) {
3852 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3853 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3854 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3855 					  tx_power_range, 2);
3856 	}
3857 
3858 	rp->cap_len = cpu_to_le16(cap_len);
3859 
3860 	hci_dev_unlock(hdev);
3861 
3862 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3863 				 rp, sizeof(*rp) + cap_len);
3864 }
3865 
3866 #ifdef CONFIG_BT_FEATURE_DEBUG
3867 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3868 static const u8 debug_uuid[16] = {
3869 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3870 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3871 };
3872 #endif
3873 
3874 /* 330859bc-7506-492d-9370-9a6f0614037f */
3875 static const u8 quality_report_uuid[16] = {
3876 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3877 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3878 };
3879 
3880 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3881 static const u8 offload_codecs_uuid[16] = {
3882 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3883 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3884 };
3885 
3886 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3887 static const u8 le_simultaneous_roles_uuid[16] = {
3888 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3889 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3890 };
3891 
3892 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3893 static const u8 rpa_resolution_uuid[16] = {
3894 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3895 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3896 };
3897 
3898 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3899 				  void *data, u16 data_len)
3900 {
3901 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3902 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3903 	u16 idx = 0;
3904 	u32 flags;
3905 
3906 	bt_dev_dbg(hdev, "sock %p", sk);
3907 
3908 	memset(&buf, 0, sizeof(buf));
3909 
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3911 	if (!hdev) {
3912 		flags = bt_dbg_get() ? BIT(0) : 0;
3913 
3914 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3915 		rp->features[idx].flags = cpu_to_le32(flags);
3916 		idx++;
3917 	}
3918 #endif
3919 
3920 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3921 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3922 			flags = BIT(0);
3923 		else
3924 			flags = 0;
3925 
3926 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3927 		rp->features[idx].flags = cpu_to_le32(flags);
3928 		idx++;
3929 	}
3930 
3931 	if (hdev && ll_privacy_capable(hdev)) {
3932 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3933 			flags = BIT(0) | BIT(1);
3934 		else
3935 			flags = BIT(1);
3936 
3937 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3938 		rp->features[idx].flags = cpu_to_le32(flags);
3939 		idx++;
3940 	}
3941 
3942 	if (hdev && (aosp_has_quality_report(hdev) ||
3943 		     hdev->set_quality_report)) {
3944 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3945 			flags = BIT(0);
3946 		else
3947 			flags = 0;
3948 
3949 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3950 		rp->features[idx].flags = cpu_to_le32(flags);
3951 		idx++;
3952 	}
3953 
3954 	if (hdev && hdev->get_data_path_id) {
3955 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3956 			flags = BIT(0);
3957 		else
3958 			flags = 0;
3959 
3960 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3961 		rp->features[idx].flags = cpu_to_le32(flags);
3962 		idx++;
3963 	}
3964 
3965 	rp->feature_count = cpu_to_le16(idx);
3966 
3967 	/* After reading the experimental features information, enable
3968 	 * the events to update client on any future change.
3969 	 */
3970 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3971 
3972 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3973 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3974 				 0, rp, sizeof(*rp) + (20 * idx));
3975 }
3976 
3977 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3978 					  struct sock *skip)
3979 {
3980 	struct mgmt_ev_exp_feature_changed ev;
3981 
3982 	memset(&ev, 0, sizeof(ev));
3983 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3984 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3985 
3986 	if (enabled && privacy_mode_capable(hdev))
3987 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
3988 	else
3989 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
3990 
3991 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3992 				  &ev, sizeof(ev),
3993 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3994 
3995 }
3996 
3997 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
3998 			       bool enabled, struct sock *skip)
3999 {
4000 	struct mgmt_ev_exp_feature_changed ev;
4001 
4002 	memset(&ev, 0, sizeof(ev));
4003 	memcpy(ev.uuid, uuid, 16);
4004 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4005 
4006 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4007 				  &ev, sizeof(ev),
4008 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4009 }
4010 
4011 #define EXP_FEAT(_uuid, _set_func)	\
4012 {					\
4013 	.uuid = _uuid,			\
4014 	.set_func = _set_func,		\
4015 }
4016 
4017 /* The zero key uuid is special. Multiple exp features are set through it. */
4018 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4019 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4020 {
4021 	struct mgmt_rp_set_exp_feature rp;
4022 
4023 	memset(rp.uuid, 0, 16);
4024 	rp.flags = cpu_to_le32(0);
4025 
4026 #ifdef CONFIG_BT_FEATURE_DEBUG
4027 	if (!hdev) {
4028 		bool changed = bt_dbg_get();
4029 
4030 		bt_dbg_set(false);
4031 
4032 		if (changed)
4033 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4034 	}
4035 #endif
4036 
4037 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4038 		bool changed;
4039 
4040 		changed = hci_dev_test_and_clear_flag(hdev,
4041 						      HCI_ENABLE_LL_PRIVACY);
4042 		if (changed)
4043 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4044 					    sk);
4045 	}
4046 
4047 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4048 
4049 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4050 				 MGMT_OP_SET_EXP_FEATURE, 0,
4051 				 &rp, sizeof(rp));
4052 }
4053 
4054 #ifdef CONFIG_BT_FEATURE_DEBUG
4055 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4056 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4057 {
4058 	struct mgmt_rp_set_exp_feature rp;
4059 
4060 	bool val, changed;
4061 	int err;
4062 
4063 	/* Command requires to use the non-controller index */
4064 	if (hdev)
4065 		return mgmt_cmd_status(sk, hdev->id,
4066 				       MGMT_OP_SET_EXP_FEATURE,
4067 				       MGMT_STATUS_INVALID_INDEX);
4068 
4069 	/* Parameters are limited to a single octet */
4070 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4071 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4072 				       MGMT_OP_SET_EXP_FEATURE,
4073 				       MGMT_STATUS_INVALID_PARAMS);
4074 
4075 	/* Only boolean on/off is supported */
4076 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4077 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4078 				       MGMT_OP_SET_EXP_FEATURE,
4079 				       MGMT_STATUS_INVALID_PARAMS);
4080 
4081 	val = !!cp->param[0];
4082 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4083 	bt_dbg_set(val);
4084 
4085 	memcpy(rp.uuid, debug_uuid, 16);
4086 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4087 
4088 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4089 
4090 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4091 				MGMT_OP_SET_EXP_FEATURE, 0,
4092 				&rp, sizeof(rp));
4093 
4094 	if (changed)
4095 		exp_feature_changed(hdev, debug_uuid, val, sk);
4096 
4097 	return err;
4098 }
4099 #endif
4100 
4101 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4102 				   struct mgmt_cp_set_exp_feature *cp,
4103 				   u16 data_len)
4104 {
4105 	struct mgmt_rp_set_exp_feature rp;
4106 	bool val, changed;
4107 	int err;
4108 	u32 flags;
4109 
4110 	/* Command requires to use the controller index */
4111 	if (!hdev)
4112 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4113 				       MGMT_OP_SET_EXP_FEATURE,
4114 				       MGMT_STATUS_INVALID_INDEX);
4115 
4116 	/* Changes can only be made when controller is powered down */
4117 	if (hdev_is_powered(hdev))
4118 		return mgmt_cmd_status(sk, hdev->id,
4119 				       MGMT_OP_SET_EXP_FEATURE,
4120 				       MGMT_STATUS_REJECTED);
4121 
4122 	/* Parameters are limited to a single octet */
4123 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4124 		return mgmt_cmd_status(sk, hdev->id,
4125 				       MGMT_OP_SET_EXP_FEATURE,
4126 				       MGMT_STATUS_INVALID_PARAMS);
4127 
4128 	/* Only boolean on/off is supported */
4129 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4130 		return mgmt_cmd_status(sk, hdev->id,
4131 				       MGMT_OP_SET_EXP_FEATURE,
4132 				       MGMT_STATUS_INVALID_PARAMS);
4133 
4134 	val = !!cp->param[0];
4135 
4136 	if (val) {
4137 		changed = !hci_dev_test_and_set_flag(hdev,
4138 						     HCI_ENABLE_LL_PRIVACY);
4139 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4140 
4141 		/* Enable LL privacy + supported settings changed */
4142 		flags = BIT(0) | BIT(1);
4143 	} else {
4144 		changed = hci_dev_test_and_clear_flag(hdev,
4145 						      HCI_ENABLE_LL_PRIVACY);
4146 
4147 		/* Disable LL privacy + supported settings changed */
4148 		flags = BIT(1);
4149 	}
4150 
4151 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4152 	rp.flags = cpu_to_le32(flags);
4153 
4154 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4155 
4156 	err = mgmt_cmd_complete(sk, hdev->id,
4157 				MGMT_OP_SET_EXP_FEATURE, 0,
4158 				&rp, sizeof(rp));
4159 
4160 	if (changed)
4161 		exp_ll_privacy_feature_changed(val, hdev, sk);
4162 
4163 	return err;
4164 }
4165 
4166 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4167 				   struct mgmt_cp_set_exp_feature *cp,
4168 				   u16 data_len)
4169 {
4170 	struct mgmt_rp_set_exp_feature rp;
4171 	bool val, changed;
4172 	int err;
4173 
4174 	/* Command requires to use a valid controller index */
4175 	if (!hdev)
4176 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4177 				       MGMT_OP_SET_EXP_FEATURE,
4178 				       MGMT_STATUS_INVALID_INDEX);
4179 
4180 	/* Parameters are limited to a single octet */
4181 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4182 		return mgmt_cmd_status(sk, hdev->id,
4183 				       MGMT_OP_SET_EXP_FEATURE,
4184 				       MGMT_STATUS_INVALID_PARAMS);
4185 
4186 	/* Only boolean on/off is supported */
4187 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4188 		return mgmt_cmd_status(sk, hdev->id,
4189 				       MGMT_OP_SET_EXP_FEATURE,
4190 				       MGMT_STATUS_INVALID_PARAMS);
4191 
4192 	hci_req_sync_lock(hdev);
4193 
4194 	val = !!cp->param[0];
4195 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4196 
4197 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4198 		err = mgmt_cmd_status(sk, hdev->id,
4199 				      MGMT_OP_SET_EXP_FEATURE,
4200 				      MGMT_STATUS_NOT_SUPPORTED);
4201 		goto unlock_quality_report;
4202 	}
4203 
4204 	if (changed) {
4205 		if (hdev->set_quality_report)
4206 			err = hdev->set_quality_report(hdev, val);
4207 		else
4208 			err = aosp_set_quality_report(hdev, val);
4209 
4210 		if (err) {
4211 			err = mgmt_cmd_status(sk, hdev->id,
4212 					      MGMT_OP_SET_EXP_FEATURE,
4213 					      MGMT_STATUS_FAILED);
4214 			goto unlock_quality_report;
4215 		}
4216 
4217 		if (val)
4218 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4219 		else
4220 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4221 	}
4222 
4223 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4224 
4225 	memcpy(rp.uuid, quality_report_uuid, 16);
4226 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4227 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4228 
4229 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4230 				&rp, sizeof(rp));
4231 
4232 	if (changed)
4233 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4234 
4235 unlock_quality_report:
4236 	hci_req_sync_unlock(hdev);
4237 	return err;
4238 }
4239 
4240 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4241 				  struct mgmt_cp_set_exp_feature *cp,
4242 				  u16 data_len)
4243 {
4244 	bool val, changed;
4245 	int err;
4246 	struct mgmt_rp_set_exp_feature rp;
4247 
4248 	/* Command requires to use a valid controller index */
4249 	if (!hdev)
4250 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4251 				       MGMT_OP_SET_EXP_FEATURE,
4252 				       MGMT_STATUS_INVALID_INDEX);
4253 
4254 	/* Parameters are limited to a single octet */
4255 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4256 		return mgmt_cmd_status(sk, hdev->id,
4257 				       MGMT_OP_SET_EXP_FEATURE,
4258 				       MGMT_STATUS_INVALID_PARAMS);
4259 
4260 	/* Only boolean on/off is supported */
4261 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4262 		return mgmt_cmd_status(sk, hdev->id,
4263 				       MGMT_OP_SET_EXP_FEATURE,
4264 				       MGMT_STATUS_INVALID_PARAMS);
4265 
4266 	val = !!cp->param[0];
4267 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4268 
4269 	if (!hdev->get_data_path_id) {
4270 		return mgmt_cmd_status(sk, hdev->id,
4271 				       MGMT_OP_SET_EXP_FEATURE,
4272 				       MGMT_STATUS_NOT_SUPPORTED);
4273 	}
4274 
4275 	if (changed) {
4276 		if (val)
4277 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4278 		else
4279 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4280 	}
4281 
4282 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4283 		    val, changed);
4284 
4285 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4286 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4287 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4288 	err = mgmt_cmd_complete(sk, hdev->id,
4289 				MGMT_OP_SET_EXP_FEATURE, 0,
4290 				&rp, sizeof(rp));
4291 
4292 	if (changed)
4293 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4294 
4295 	return err;
4296 }
4297 
4298 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4299 					  struct mgmt_cp_set_exp_feature *cp,
4300 					  u16 data_len)
4301 {
4302 	bool val, changed;
4303 	int err;
4304 	struct mgmt_rp_set_exp_feature rp;
4305 
4306 	/* Command requires to use a valid controller index */
4307 	if (!hdev)
4308 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4309 				       MGMT_OP_SET_EXP_FEATURE,
4310 				       MGMT_STATUS_INVALID_INDEX);
4311 
4312 	/* Parameters are limited to a single octet */
4313 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4314 		return mgmt_cmd_status(sk, hdev->id,
4315 				       MGMT_OP_SET_EXP_FEATURE,
4316 				       MGMT_STATUS_INVALID_PARAMS);
4317 
4318 	/* Only boolean on/off is supported */
4319 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4320 		return mgmt_cmd_status(sk, hdev->id,
4321 				       MGMT_OP_SET_EXP_FEATURE,
4322 				       MGMT_STATUS_INVALID_PARAMS);
4323 
4324 	val = !!cp->param[0];
4325 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4326 
4327 	if (!hci_dev_le_state_simultaneous(hdev)) {
4328 		return mgmt_cmd_status(sk, hdev->id,
4329 				       MGMT_OP_SET_EXP_FEATURE,
4330 				       MGMT_STATUS_NOT_SUPPORTED);
4331 	}
4332 
4333 	if (changed) {
4334 		if (val)
4335 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4336 		else
4337 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4338 	}
4339 
4340 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4341 		    val, changed);
4342 
4343 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4344 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4345 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4346 	err = mgmt_cmd_complete(sk, hdev->id,
4347 				MGMT_OP_SET_EXP_FEATURE, 0,
4348 				&rp, sizeof(rp));
4349 
4350 	if (changed)
4351 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4352 
4353 	return err;
4354 }
4355 
4356 static const struct mgmt_exp_feature {
4357 	const u8 *uuid;
4358 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4359 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4360 } exp_features[] = {
4361 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4362 #ifdef CONFIG_BT_FEATURE_DEBUG
4363 	EXP_FEAT(debug_uuid, set_debug_func),
4364 #endif
4365 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4366 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4367 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4368 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4369 
4370 	/* end with a null feature */
4371 	EXP_FEAT(NULL, NULL)
4372 };
4373 
4374 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4375 			   void *data, u16 data_len)
4376 {
4377 	struct mgmt_cp_set_exp_feature *cp = data;
4378 	size_t i = 0;
4379 
4380 	bt_dev_dbg(hdev, "sock %p", sk);
4381 
4382 	for (i = 0; exp_features[i].uuid; i++) {
4383 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4384 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4385 	}
4386 
4387 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4388 			       MGMT_OP_SET_EXP_FEATURE,
4389 			       MGMT_STATUS_NOT_SUPPORTED);
4390 }
4391 
4392 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4393 			    u16 data_len)
4394 {
4395 	struct mgmt_cp_get_device_flags *cp = data;
4396 	struct mgmt_rp_get_device_flags rp;
4397 	struct bdaddr_list_with_flags *br_params;
4398 	struct hci_conn_params *params;
4399 	u32 supported_flags;
4400 	u32 current_flags = 0;
4401 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4402 
4403 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4404 		   &cp->addr.bdaddr, cp->addr.type);
4405 
4406 	hci_dev_lock(hdev);
4407 
4408 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4409 			__HCI_CONN_NUM_FLAGS);
4410 
4411 	memset(&rp, 0, sizeof(rp));
4412 
4413 	if (cp->addr.type == BDADDR_BREDR) {
4414 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4415 							      &cp->addr.bdaddr,
4416 							      cp->addr.type);
4417 		if (!br_params)
4418 			goto done;
4419 
4420 		bitmap_to_arr32(&current_flags, br_params->flags,
4421 				__HCI_CONN_NUM_FLAGS);
4422 	} else {
4423 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4424 						le_addr_type(cp->addr.type));
4425 
4426 		if (!params)
4427 			goto done;
4428 
4429 		bitmap_to_arr32(&current_flags, params->flags,
4430 				__HCI_CONN_NUM_FLAGS);
4431 	}
4432 
4433 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4434 	rp.addr.type = cp->addr.type;
4435 	rp.supported_flags = cpu_to_le32(supported_flags);
4436 	rp.current_flags = cpu_to_le32(current_flags);
4437 
4438 	status = MGMT_STATUS_SUCCESS;
4439 
4440 done:
4441 	hci_dev_unlock(hdev);
4442 
4443 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4444 				&rp, sizeof(rp));
4445 }
4446 
4447 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4448 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4449 				 u32 supported_flags, u32 current_flags)
4450 {
4451 	struct mgmt_ev_device_flags_changed ev;
4452 
4453 	bacpy(&ev.addr.bdaddr, bdaddr);
4454 	ev.addr.type = bdaddr_type;
4455 	ev.supported_flags = cpu_to_le32(supported_flags);
4456 	ev.current_flags = cpu_to_le32(current_flags);
4457 
4458 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4459 }
4460 
4461 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4462 			    u16 len)
4463 {
4464 	struct mgmt_cp_set_device_flags *cp = data;
4465 	struct bdaddr_list_with_flags *br_params;
4466 	struct hci_conn_params *params;
4467 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4468 	u32 supported_flags;
4469 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4470 
4471 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4472 		   &cp->addr.bdaddr, cp->addr.type,
4473 		   __le32_to_cpu(current_flags));
4474 
4475 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4476 			__HCI_CONN_NUM_FLAGS);
4477 
4478 	if ((supported_flags | current_flags) != supported_flags) {
4479 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4480 			    current_flags, supported_flags);
4481 		goto done;
4482 	}
4483 
4484 	hci_dev_lock(hdev);
4485 
4486 	if (cp->addr.type == BDADDR_BREDR) {
4487 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4488 							      &cp->addr.bdaddr,
4489 							      cp->addr.type);
4490 
4491 		if (br_params) {
4492 			bitmap_from_u64(br_params->flags, current_flags);
4493 			status = MGMT_STATUS_SUCCESS;
4494 		} else {
4495 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4496 				    &cp->addr.bdaddr, cp->addr.type);
4497 		}
4498 	} else {
4499 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4500 						le_addr_type(cp->addr.type));
4501 		if (params) {
4502 			bitmap_from_u64(params->flags, current_flags);
4503 			status = MGMT_STATUS_SUCCESS;
4504 
4505 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4506 			 * has been set.
4507 			 */
4508 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4509 				     params->flags))
4510 				hci_update_passive_scan(hdev);
4511 		} else {
4512 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4513 				    &cp->addr.bdaddr,
4514 				    le_addr_type(cp->addr.type));
4515 		}
4516 	}
4517 
4518 done:
4519 	hci_dev_unlock(hdev);
4520 
4521 	if (status == MGMT_STATUS_SUCCESS)
4522 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4523 				     supported_flags, current_flags);
4524 
4525 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4526 				 &cp->addr, sizeof(cp->addr));
4527 }
4528 
4529 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4530 				   u16 handle)
4531 {
4532 	struct mgmt_ev_adv_monitor_added ev;
4533 
4534 	ev.monitor_handle = cpu_to_le16(handle);
4535 
4536 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4537 }
4538 
4539 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4540 {
4541 	struct mgmt_ev_adv_monitor_removed ev;
4542 	struct mgmt_pending_cmd *cmd;
4543 	struct sock *sk_skip = NULL;
4544 	struct mgmt_cp_remove_adv_monitor *cp;
4545 
4546 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4547 	if (cmd) {
4548 		cp = cmd->param;
4549 
4550 		if (cp->monitor_handle)
4551 			sk_skip = cmd->sk;
4552 	}
4553 
4554 	ev.monitor_handle = cpu_to_le16(handle);
4555 
4556 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4557 }
4558 
4559 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4560 				 void *data, u16 len)
4561 {
4562 	struct adv_monitor *monitor = NULL;
4563 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4564 	int handle, err;
4565 	size_t rp_size = 0;
4566 	__u32 supported = 0;
4567 	__u32 enabled = 0;
4568 	__u16 num_handles = 0;
4569 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4570 
4571 	BT_DBG("request for %s", hdev->name);
4572 
4573 	hci_dev_lock(hdev);
4574 
4575 	if (msft_monitor_supported(hdev))
4576 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4577 
4578 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4579 		handles[num_handles++] = monitor->handle;
4580 
4581 	hci_dev_unlock(hdev);
4582 
4583 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4584 	rp = kmalloc(rp_size, GFP_KERNEL);
4585 	if (!rp)
4586 		return -ENOMEM;
4587 
4588 	/* All supported features are currently enabled */
4589 	enabled = supported;
4590 
4591 	rp->supported_features = cpu_to_le32(supported);
4592 	rp->enabled_features = cpu_to_le32(enabled);
4593 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4594 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4595 	rp->num_handles = cpu_to_le16(num_handles);
4596 	if (num_handles)
4597 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4598 
4599 	err = mgmt_cmd_complete(sk, hdev->id,
4600 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4601 				MGMT_STATUS_SUCCESS, rp, rp_size);
4602 
4603 	kfree(rp);
4604 
4605 	return err;
4606 }
4607 
4608 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4609 {
4610 	struct mgmt_rp_add_adv_patterns_monitor rp;
4611 	struct mgmt_pending_cmd *cmd;
4612 	struct adv_monitor *monitor;
4613 	int err = 0;
4614 
4615 	hci_dev_lock(hdev);
4616 
4617 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4618 	if (!cmd) {
4619 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4620 		if (!cmd)
4621 			goto done;
4622 	}
4623 
4624 	monitor = cmd->user_data;
4625 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4626 
4627 	if (!status) {
4628 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4629 		hdev->adv_monitors_cnt++;
4630 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4631 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4632 		hci_update_passive_scan(hdev);
4633 	}
4634 
4635 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4636 				mgmt_status(status), &rp, sizeof(rp));
4637 	mgmt_pending_remove(cmd);
4638 
4639 done:
4640 	hci_dev_unlock(hdev);
4641 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4642 		   rp.monitor_handle, status);
4643 
4644 	return err;
4645 }
4646 
4647 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4648 				      struct adv_monitor *m, u8 status,
4649 				      void *data, u16 len, u16 op)
4650 {
4651 	struct mgmt_rp_add_adv_patterns_monitor rp;
4652 	struct mgmt_pending_cmd *cmd;
4653 	int err;
4654 	bool pending;
4655 
4656 	hci_dev_lock(hdev);
4657 
4658 	if (status)
4659 		goto unlock;
4660 
4661 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4662 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4663 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4664 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4665 		status = MGMT_STATUS_BUSY;
4666 		goto unlock;
4667 	}
4668 
4669 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4670 	if (!cmd) {
4671 		status = MGMT_STATUS_NO_RESOURCES;
4672 		goto unlock;
4673 	}
4674 
4675 	cmd->user_data = m;
4676 	pending = hci_add_adv_monitor(hdev, m, &err);
4677 	if (err) {
4678 		if (err == -ENOSPC || err == -ENOMEM)
4679 			status = MGMT_STATUS_NO_RESOURCES;
4680 		else if (err == -EINVAL)
4681 			status = MGMT_STATUS_INVALID_PARAMS;
4682 		else
4683 			status = MGMT_STATUS_FAILED;
4684 
4685 		mgmt_pending_remove(cmd);
4686 		goto unlock;
4687 	}
4688 
4689 	if (!pending) {
4690 		mgmt_pending_remove(cmd);
4691 		rp.monitor_handle = cpu_to_le16(m->handle);
4692 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4693 		m->state = ADV_MONITOR_STATE_REGISTERED;
4694 		hdev->adv_monitors_cnt++;
4695 
4696 		hci_dev_unlock(hdev);
4697 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4698 					 &rp, sizeof(rp));
4699 	}
4700 
4701 	hci_dev_unlock(hdev);
4702 
4703 	return 0;
4704 
4705 unlock:
4706 	hci_free_adv_monitor(hdev, m);
4707 	hci_dev_unlock(hdev);
4708 	return mgmt_cmd_status(sk, hdev->id, op, status);
4709 }
4710 
4711 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4712 				   struct mgmt_adv_rssi_thresholds *rssi)
4713 {
4714 	if (rssi) {
4715 		m->rssi.low_threshold = rssi->low_threshold;
4716 		m->rssi.low_threshold_timeout =
4717 		    __le16_to_cpu(rssi->low_threshold_timeout);
4718 		m->rssi.high_threshold = rssi->high_threshold;
4719 		m->rssi.high_threshold_timeout =
4720 		    __le16_to_cpu(rssi->high_threshold_timeout);
4721 		m->rssi.sampling_period = rssi->sampling_period;
4722 	} else {
4723 		/* Default values. These numbers are the least constricting
4724 		 * parameters for MSFT API to work, so it behaves as if there
4725 		 * are no rssi parameter to consider. May need to be changed
4726 		 * if other API are to be supported.
4727 		 */
4728 		m->rssi.low_threshold = -127;
4729 		m->rssi.low_threshold_timeout = 60;
4730 		m->rssi.high_threshold = -127;
4731 		m->rssi.high_threshold_timeout = 0;
4732 		m->rssi.sampling_period = 0;
4733 	}
4734 }
4735 
4736 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4737 				    struct mgmt_adv_pattern *patterns)
4738 {
4739 	u8 offset = 0, length = 0;
4740 	struct adv_pattern *p = NULL;
4741 	int i;
4742 
4743 	for (i = 0; i < pattern_count; i++) {
4744 		offset = patterns[i].offset;
4745 		length = patterns[i].length;
4746 		if (offset >= HCI_MAX_AD_LENGTH ||
4747 		    length > HCI_MAX_AD_LENGTH ||
4748 		    (offset + length) > HCI_MAX_AD_LENGTH)
4749 			return MGMT_STATUS_INVALID_PARAMS;
4750 
4751 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4752 		if (!p)
4753 			return MGMT_STATUS_NO_RESOURCES;
4754 
4755 		p->ad_type = patterns[i].ad_type;
4756 		p->offset = patterns[i].offset;
4757 		p->length = patterns[i].length;
4758 		memcpy(p->value, patterns[i].value, p->length);
4759 
4760 		INIT_LIST_HEAD(&p->list);
4761 		list_add(&p->list, &m->patterns);
4762 	}
4763 
4764 	return MGMT_STATUS_SUCCESS;
4765 }
4766 
4767 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4768 				    void *data, u16 len)
4769 {
4770 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4771 	struct adv_monitor *m = NULL;
4772 	u8 status = MGMT_STATUS_SUCCESS;
4773 	size_t expected_size = sizeof(*cp);
4774 
4775 	BT_DBG("request for %s", hdev->name);
4776 
4777 	if (len <= sizeof(*cp)) {
4778 		status = MGMT_STATUS_INVALID_PARAMS;
4779 		goto done;
4780 	}
4781 
4782 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4783 	if (len != expected_size) {
4784 		status = MGMT_STATUS_INVALID_PARAMS;
4785 		goto done;
4786 	}
4787 
4788 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4789 	if (!m) {
4790 		status = MGMT_STATUS_NO_RESOURCES;
4791 		goto done;
4792 	}
4793 
4794 	INIT_LIST_HEAD(&m->patterns);
4795 
4796 	parse_adv_monitor_rssi(m, NULL);
4797 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4798 
4799 done:
4800 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4801 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4802 }
4803 
4804 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4805 					 void *data, u16 len)
4806 {
4807 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4808 	struct adv_monitor *m = NULL;
4809 	u8 status = MGMT_STATUS_SUCCESS;
4810 	size_t expected_size = sizeof(*cp);
4811 
4812 	BT_DBG("request for %s", hdev->name);
4813 
4814 	if (len <= sizeof(*cp)) {
4815 		status = MGMT_STATUS_INVALID_PARAMS;
4816 		goto done;
4817 	}
4818 
4819 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4820 	if (len != expected_size) {
4821 		status = MGMT_STATUS_INVALID_PARAMS;
4822 		goto done;
4823 	}
4824 
4825 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4826 	if (!m) {
4827 		status = MGMT_STATUS_NO_RESOURCES;
4828 		goto done;
4829 	}
4830 
4831 	INIT_LIST_HEAD(&m->patterns);
4832 
4833 	parse_adv_monitor_rssi(m, &cp->rssi);
4834 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4835 
4836 done:
4837 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4838 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4839 }
4840 
4841 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4842 {
4843 	struct mgmt_rp_remove_adv_monitor rp;
4844 	struct mgmt_cp_remove_adv_monitor *cp;
4845 	struct mgmt_pending_cmd *cmd;
4846 	int err = 0;
4847 
4848 	hci_dev_lock(hdev);
4849 
4850 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4851 	if (!cmd)
4852 		goto done;
4853 
4854 	cp = cmd->param;
4855 	rp.monitor_handle = cp->monitor_handle;
4856 
4857 	if (!status)
4858 		hci_update_passive_scan(hdev);
4859 
4860 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4861 				mgmt_status(status), &rp, sizeof(rp));
4862 	mgmt_pending_remove(cmd);
4863 
4864 done:
4865 	hci_dev_unlock(hdev);
4866 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4867 		   rp.monitor_handle, status);
4868 
4869 	return err;
4870 }
4871 
4872 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4873 			      void *data, u16 len)
4874 {
4875 	struct mgmt_cp_remove_adv_monitor *cp = data;
4876 	struct mgmt_rp_remove_adv_monitor rp;
4877 	struct mgmt_pending_cmd *cmd;
4878 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4879 	int err, status;
4880 	bool pending;
4881 
4882 	BT_DBG("request for %s", hdev->name);
4883 	rp.monitor_handle = cp->monitor_handle;
4884 
4885 	hci_dev_lock(hdev);
4886 
4887 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4888 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4889 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4890 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4891 		status = MGMT_STATUS_BUSY;
4892 		goto unlock;
4893 	}
4894 
4895 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4896 	if (!cmd) {
4897 		status = MGMT_STATUS_NO_RESOURCES;
4898 		goto unlock;
4899 	}
4900 
4901 	if (handle)
4902 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4903 	else
4904 		pending = hci_remove_all_adv_monitor(hdev, &err);
4905 
4906 	if (err) {
4907 		mgmt_pending_remove(cmd);
4908 
4909 		if (err == -ENOENT)
4910 			status = MGMT_STATUS_INVALID_INDEX;
4911 		else
4912 			status = MGMT_STATUS_FAILED;
4913 
4914 		goto unlock;
4915 	}
4916 
4917 	/* monitor can be removed without forwarding request to controller */
4918 	if (!pending) {
4919 		mgmt_pending_remove(cmd);
4920 		hci_dev_unlock(hdev);
4921 
4922 		return mgmt_cmd_complete(sk, hdev->id,
4923 					 MGMT_OP_REMOVE_ADV_MONITOR,
4924 					 MGMT_STATUS_SUCCESS,
4925 					 &rp, sizeof(rp));
4926 	}
4927 
4928 	hci_dev_unlock(hdev);
4929 	return 0;
4930 
4931 unlock:
4932 	hci_dev_unlock(hdev);
4933 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4934 			       status);
4935 }
4936 
4937 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4938 {
4939 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4940 	size_t rp_size = sizeof(mgmt_rp);
4941 	struct mgmt_pending_cmd *cmd = data;
4942 	struct sk_buff *skb = cmd->skb;
4943 	u8 status = mgmt_status(err);
4944 
4945 	if (!status) {
4946 		if (!skb)
4947 			status = MGMT_STATUS_FAILED;
4948 		else if (IS_ERR(skb))
4949 			status = mgmt_status(PTR_ERR(skb));
4950 		else
4951 			status = mgmt_status(skb->data[0]);
4952 	}
4953 
4954 	bt_dev_dbg(hdev, "status %d", status);
4955 
4956 	if (status) {
4957 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4958 		goto remove;
4959 	}
4960 
4961 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4962 
4963 	if (!bredr_sc_enabled(hdev)) {
4964 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4965 
4966 		if (skb->len < sizeof(*rp)) {
4967 			mgmt_cmd_status(cmd->sk, hdev->id,
4968 					MGMT_OP_READ_LOCAL_OOB_DATA,
4969 					MGMT_STATUS_FAILED);
4970 			goto remove;
4971 		}
4972 
4973 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4974 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4975 
4976 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4977 	} else {
4978 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4979 
4980 		if (skb->len < sizeof(*rp)) {
4981 			mgmt_cmd_status(cmd->sk, hdev->id,
4982 					MGMT_OP_READ_LOCAL_OOB_DATA,
4983 					MGMT_STATUS_FAILED);
4984 			goto remove;
4985 		}
4986 
4987 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4988 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4989 
4990 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4991 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4992 	}
4993 
4994 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4995 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4996 
4997 remove:
4998 	if (skb && !IS_ERR(skb))
4999 		kfree_skb(skb);
5000 
5001 	mgmt_pending_free(cmd);
5002 }
5003 
5004 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5005 {
5006 	struct mgmt_pending_cmd *cmd = data;
5007 
5008 	if (bredr_sc_enabled(hdev))
5009 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5010 	else
5011 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5012 
5013 	if (IS_ERR(cmd->skb))
5014 		return PTR_ERR(cmd->skb);
5015 	else
5016 		return 0;
5017 }
5018 
5019 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5020 			       void *data, u16 data_len)
5021 {
5022 	struct mgmt_pending_cmd *cmd;
5023 	int err;
5024 
5025 	bt_dev_dbg(hdev, "sock %p", sk);
5026 
5027 	hci_dev_lock(hdev);
5028 
5029 	if (!hdev_is_powered(hdev)) {
5030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5031 				      MGMT_STATUS_NOT_POWERED);
5032 		goto unlock;
5033 	}
5034 
5035 	if (!lmp_ssp_capable(hdev)) {
5036 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5037 				      MGMT_STATUS_NOT_SUPPORTED);
5038 		goto unlock;
5039 	}
5040 
5041 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
5042 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5043 				      MGMT_STATUS_BUSY);
5044 		goto unlock;
5045 	}
5046 
5047 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5048 	if (!cmd)
5049 		err = -ENOMEM;
5050 	else
5051 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5052 					 read_local_oob_data_complete);
5053 
5054 	if (err < 0) {
5055 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5056 				      MGMT_STATUS_FAILED);
5057 
5058 		if (cmd)
5059 			mgmt_pending_free(cmd);
5060 	}
5061 
5062 unlock:
5063 	hci_dev_unlock(hdev);
5064 	return err;
5065 }
5066 
5067 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5068 			       void *data, u16 len)
5069 {
5070 	struct mgmt_addr_info *addr = data;
5071 	int err;
5072 
5073 	bt_dev_dbg(hdev, "sock %p", sk);
5074 
5075 	if (!bdaddr_type_is_valid(addr->type))
5076 		return mgmt_cmd_complete(sk, hdev->id,
5077 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5078 					 MGMT_STATUS_INVALID_PARAMS,
5079 					 addr, sizeof(*addr));
5080 
5081 	hci_dev_lock(hdev);
5082 
5083 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5084 		struct mgmt_cp_add_remote_oob_data *cp = data;
5085 		u8 status;
5086 
5087 		if (cp->addr.type != BDADDR_BREDR) {
5088 			err = mgmt_cmd_complete(sk, hdev->id,
5089 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5090 						MGMT_STATUS_INVALID_PARAMS,
5091 						&cp->addr, sizeof(cp->addr));
5092 			goto unlock;
5093 		}
5094 
5095 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5096 					      cp->addr.type, cp->hash,
5097 					      cp->rand, NULL, NULL);
5098 		if (err < 0)
5099 			status = MGMT_STATUS_FAILED;
5100 		else
5101 			status = MGMT_STATUS_SUCCESS;
5102 
5103 		err = mgmt_cmd_complete(sk, hdev->id,
5104 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5105 					&cp->addr, sizeof(cp->addr));
5106 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5107 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5108 		u8 *rand192, *hash192, *rand256, *hash256;
5109 		u8 status;
5110 
5111 		if (bdaddr_type_is_le(cp->addr.type)) {
5112 			/* Enforce zero-valued 192-bit parameters as
5113 			 * long as legacy SMP OOB isn't implemented.
5114 			 */
5115 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5116 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5117 				err = mgmt_cmd_complete(sk, hdev->id,
5118 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5119 							MGMT_STATUS_INVALID_PARAMS,
5120 							addr, sizeof(*addr));
5121 				goto unlock;
5122 			}
5123 
5124 			rand192 = NULL;
5125 			hash192 = NULL;
5126 		} else {
5127 			/* In case one of the P-192 values is set to zero,
5128 			 * then just disable OOB data for P-192.
5129 			 */
5130 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5131 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5132 				rand192 = NULL;
5133 				hash192 = NULL;
5134 			} else {
5135 				rand192 = cp->rand192;
5136 				hash192 = cp->hash192;
5137 			}
5138 		}
5139 
5140 		/* In case one of the P-256 values is set to zero, then just
5141 		 * disable OOB data for P-256.
5142 		 */
5143 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5144 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5145 			rand256 = NULL;
5146 			hash256 = NULL;
5147 		} else {
5148 			rand256 = cp->rand256;
5149 			hash256 = cp->hash256;
5150 		}
5151 
5152 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5153 					      cp->addr.type, hash192, rand192,
5154 					      hash256, rand256);
5155 		if (err < 0)
5156 			status = MGMT_STATUS_FAILED;
5157 		else
5158 			status = MGMT_STATUS_SUCCESS;
5159 
5160 		err = mgmt_cmd_complete(sk, hdev->id,
5161 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5162 					status, &cp->addr, sizeof(cp->addr));
5163 	} else {
5164 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5165 			   len);
5166 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5167 				      MGMT_STATUS_INVALID_PARAMS);
5168 	}
5169 
5170 unlock:
5171 	hci_dev_unlock(hdev);
5172 	return err;
5173 }
5174 
5175 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5176 				  void *data, u16 len)
5177 {
5178 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5179 	u8 status;
5180 	int err;
5181 
5182 	bt_dev_dbg(hdev, "sock %p", sk);
5183 
5184 	if (cp->addr.type != BDADDR_BREDR)
5185 		return mgmt_cmd_complete(sk, hdev->id,
5186 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5187 					 MGMT_STATUS_INVALID_PARAMS,
5188 					 &cp->addr, sizeof(cp->addr));
5189 
5190 	hci_dev_lock(hdev);
5191 
5192 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5193 		hci_remote_oob_data_clear(hdev);
5194 		status = MGMT_STATUS_SUCCESS;
5195 		goto done;
5196 	}
5197 
5198 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5199 	if (err < 0)
5200 		status = MGMT_STATUS_INVALID_PARAMS;
5201 	else
5202 		status = MGMT_STATUS_SUCCESS;
5203 
5204 done:
5205 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5206 				status, &cp->addr, sizeof(cp->addr));
5207 
5208 	hci_dev_unlock(hdev);
5209 	return err;
5210 }
5211 
5212 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5213 {
5214 	struct mgmt_pending_cmd *cmd;
5215 
5216 	bt_dev_dbg(hdev, "status %u", status);
5217 
5218 	hci_dev_lock(hdev);
5219 
5220 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5221 	if (!cmd)
5222 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5223 
5224 	if (!cmd)
5225 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5226 
5227 	if (cmd) {
5228 		cmd->cmd_complete(cmd, mgmt_status(status));
5229 		mgmt_pending_remove(cmd);
5230 	}
5231 
5232 	hci_dev_unlock(hdev);
5233 }
5234 
5235 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5236 				    uint8_t *mgmt_status)
5237 {
5238 	switch (type) {
5239 	case DISCOV_TYPE_LE:
5240 		*mgmt_status = mgmt_le_support(hdev);
5241 		if (*mgmt_status)
5242 			return false;
5243 		break;
5244 	case DISCOV_TYPE_INTERLEAVED:
5245 		*mgmt_status = mgmt_le_support(hdev);
5246 		if (*mgmt_status)
5247 			return false;
5248 		fallthrough;
5249 	case DISCOV_TYPE_BREDR:
5250 		*mgmt_status = mgmt_bredr_support(hdev);
5251 		if (*mgmt_status)
5252 			return false;
5253 		break;
5254 	default:
5255 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5256 		return false;
5257 	}
5258 
5259 	return true;
5260 }
5261 
5262 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5263 {
5264 	struct mgmt_pending_cmd *cmd = data;
5265 
5266 	bt_dev_dbg(hdev, "err %d", err);
5267 
5268 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5269 			  cmd->param, 1);
5270 	mgmt_pending_free(cmd);
5271 
5272 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5273 				DISCOVERY_FINDING);
5274 }
5275 
5276 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5277 {
5278 	return hci_start_discovery_sync(hdev);
5279 }
5280 
5281 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5282 				    u16 op, void *data, u16 len)
5283 {
5284 	struct mgmt_cp_start_discovery *cp = data;
5285 	struct mgmt_pending_cmd *cmd;
5286 	u8 status;
5287 	int err;
5288 
5289 	bt_dev_dbg(hdev, "sock %p", sk);
5290 
5291 	hci_dev_lock(hdev);
5292 
5293 	if (!hdev_is_powered(hdev)) {
5294 		err = mgmt_cmd_complete(sk, hdev->id, op,
5295 					MGMT_STATUS_NOT_POWERED,
5296 					&cp->type, sizeof(cp->type));
5297 		goto failed;
5298 	}
5299 
5300 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5301 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5302 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5303 					&cp->type, sizeof(cp->type));
5304 		goto failed;
5305 	}
5306 
5307 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5308 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5309 					&cp->type, sizeof(cp->type));
5310 		goto failed;
5311 	}
5312 
5313 	/* Can't start discovery when it is paused */
5314 	if (hdev->discovery_paused) {
5315 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5316 					&cp->type, sizeof(cp->type));
5317 		goto failed;
5318 	}
5319 
5320 	/* Clear the discovery filter first to free any previously
5321 	 * allocated memory for the UUID list.
5322 	 */
5323 	hci_discovery_filter_clear(hdev);
5324 
5325 	hdev->discovery.type = cp->type;
5326 	hdev->discovery.report_invalid_rssi = false;
5327 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5328 		hdev->discovery.limited = true;
5329 	else
5330 		hdev->discovery.limited = false;
5331 
5332 	cmd = mgmt_pending_new(sk, op, hdev, data, len);
5333 	if (!cmd) {
5334 		err = -ENOMEM;
5335 		goto failed;
5336 	}
5337 
5338 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5339 				 start_discovery_complete);
5340 	if (err < 0) {
5341 		mgmt_pending_free(cmd);
5342 		goto failed;
5343 	}
5344 
5345 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5346 
5347 failed:
5348 	hci_dev_unlock(hdev);
5349 	return err;
5350 }
5351 
5352 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5353 			   void *data, u16 len)
5354 {
5355 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5356 					data, len);
5357 }
5358 
5359 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5360 				   void *data, u16 len)
5361 {
5362 	return start_discovery_internal(sk, hdev,
5363 					MGMT_OP_START_LIMITED_DISCOVERY,
5364 					data, len);
5365 }
5366 
5367 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5368 				   void *data, u16 len)
5369 {
5370 	struct mgmt_cp_start_service_discovery *cp = data;
5371 	struct mgmt_pending_cmd *cmd;
5372 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5373 	u16 uuid_count, expected_len;
5374 	u8 status;
5375 	int err;
5376 
5377 	bt_dev_dbg(hdev, "sock %p", sk);
5378 
5379 	hci_dev_lock(hdev);
5380 
5381 	if (!hdev_is_powered(hdev)) {
5382 		err = mgmt_cmd_complete(sk, hdev->id,
5383 					MGMT_OP_START_SERVICE_DISCOVERY,
5384 					MGMT_STATUS_NOT_POWERED,
5385 					&cp->type, sizeof(cp->type));
5386 		goto failed;
5387 	}
5388 
5389 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5390 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5391 		err = mgmt_cmd_complete(sk, hdev->id,
5392 					MGMT_OP_START_SERVICE_DISCOVERY,
5393 					MGMT_STATUS_BUSY, &cp->type,
5394 					sizeof(cp->type));
5395 		goto failed;
5396 	}
5397 
5398 	if (hdev->discovery_paused) {
5399 		err = mgmt_cmd_complete(sk, hdev->id,
5400 					MGMT_OP_START_SERVICE_DISCOVERY,
5401 					MGMT_STATUS_BUSY, &cp->type,
5402 					sizeof(cp->type));
5403 		goto failed;
5404 	}
5405 
5406 	uuid_count = __le16_to_cpu(cp->uuid_count);
5407 	if (uuid_count > max_uuid_count) {
5408 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5409 			   uuid_count);
5410 		err = mgmt_cmd_complete(sk, hdev->id,
5411 					MGMT_OP_START_SERVICE_DISCOVERY,
5412 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5413 					sizeof(cp->type));
5414 		goto failed;
5415 	}
5416 
5417 	expected_len = sizeof(*cp) + uuid_count * 16;
5418 	if (expected_len != len) {
5419 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5420 			   expected_len, len);
5421 		err = mgmt_cmd_complete(sk, hdev->id,
5422 					MGMT_OP_START_SERVICE_DISCOVERY,
5423 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5424 					sizeof(cp->type));
5425 		goto failed;
5426 	}
5427 
5428 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5429 		err = mgmt_cmd_complete(sk, hdev->id,
5430 					MGMT_OP_START_SERVICE_DISCOVERY,
5431 					status, &cp->type, sizeof(cp->type));
5432 		goto failed;
5433 	}
5434 
5435 	cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5436 			       hdev, data, len);
5437 	if (!cmd) {
5438 		err = -ENOMEM;
5439 		goto failed;
5440 	}
5441 
5442 	/* Clear the discovery filter first to free any previously
5443 	 * allocated memory for the UUID list.
5444 	 */
5445 	hci_discovery_filter_clear(hdev);
5446 
5447 	hdev->discovery.result_filtering = true;
5448 	hdev->discovery.type = cp->type;
5449 	hdev->discovery.rssi = cp->rssi;
5450 	hdev->discovery.uuid_count = uuid_count;
5451 
5452 	if (uuid_count > 0) {
5453 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5454 						GFP_KERNEL);
5455 		if (!hdev->discovery.uuids) {
5456 			err = mgmt_cmd_complete(sk, hdev->id,
5457 						MGMT_OP_START_SERVICE_DISCOVERY,
5458 						MGMT_STATUS_FAILED,
5459 						&cp->type, sizeof(cp->type));
5460 			mgmt_pending_remove(cmd);
5461 			goto failed;
5462 		}
5463 	}
5464 
5465 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5466 				 start_discovery_complete);
5467 	if (err < 0) {
5468 		mgmt_pending_free(cmd);
5469 		goto failed;
5470 	}
5471 
5472 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5473 
5474 failed:
5475 	hci_dev_unlock(hdev);
5476 	return err;
5477 }
5478 
5479 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5480 {
5481 	struct mgmt_pending_cmd *cmd;
5482 
5483 	bt_dev_dbg(hdev, "status %u", status);
5484 
5485 	hci_dev_lock(hdev);
5486 
5487 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5488 	if (cmd) {
5489 		cmd->cmd_complete(cmd, mgmt_status(status));
5490 		mgmt_pending_remove(cmd);
5491 	}
5492 
5493 	hci_dev_unlock(hdev);
5494 }
5495 
5496 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5497 {
5498 	struct mgmt_pending_cmd *cmd = data;
5499 
5500 	bt_dev_dbg(hdev, "err %d", err);
5501 
5502 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5503 			  cmd->param, 1);
5504 	mgmt_pending_free(cmd);
5505 
5506 	if (!err)
5507 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5508 }
5509 
5510 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5511 {
5512 	return hci_stop_discovery_sync(hdev);
5513 }
5514 
5515 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5516 			  u16 len)
5517 {
5518 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5519 	struct mgmt_pending_cmd *cmd;
5520 	int err;
5521 
5522 	bt_dev_dbg(hdev, "sock %p", sk);
5523 
5524 	hci_dev_lock(hdev);
5525 
5526 	if (!hci_discovery_active(hdev)) {
5527 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5528 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5529 					sizeof(mgmt_cp->type));
5530 		goto unlock;
5531 	}
5532 
5533 	if (hdev->discovery.type != mgmt_cp->type) {
5534 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5535 					MGMT_STATUS_INVALID_PARAMS,
5536 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5537 		goto unlock;
5538 	}
5539 
5540 	cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5541 	if (!cmd) {
5542 		err = -ENOMEM;
5543 		goto unlock;
5544 	}
5545 
5546 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5547 				 stop_discovery_complete);
5548 	if (err < 0) {
5549 		mgmt_pending_free(cmd);
5550 		goto unlock;
5551 	}
5552 
5553 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5554 
5555 unlock:
5556 	hci_dev_unlock(hdev);
5557 	return err;
5558 }
5559 
5560 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5561 			u16 len)
5562 {
5563 	struct mgmt_cp_confirm_name *cp = data;
5564 	struct inquiry_entry *e;
5565 	int err;
5566 
5567 	bt_dev_dbg(hdev, "sock %p", sk);
5568 
5569 	hci_dev_lock(hdev);
5570 
5571 	if (!hci_discovery_active(hdev)) {
5572 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5573 					MGMT_STATUS_FAILED, &cp->addr,
5574 					sizeof(cp->addr));
5575 		goto failed;
5576 	}
5577 
5578 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5579 	if (!e) {
5580 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5581 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5582 					sizeof(cp->addr));
5583 		goto failed;
5584 	}
5585 
5586 	if (cp->name_known) {
5587 		e->name_state = NAME_KNOWN;
5588 		list_del(&e->list);
5589 	} else {
5590 		e->name_state = NAME_NEEDED;
5591 		hci_inquiry_cache_update_resolve(hdev, e);
5592 	}
5593 
5594 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5595 				&cp->addr, sizeof(cp->addr));
5596 
5597 failed:
5598 	hci_dev_unlock(hdev);
5599 	return err;
5600 }
5601 
5602 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5603 			u16 len)
5604 {
5605 	struct mgmt_cp_block_device *cp = data;
5606 	u8 status;
5607 	int err;
5608 
5609 	bt_dev_dbg(hdev, "sock %p", sk);
5610 
5611 	if (!bdaddr_type_is_valid(cp->addr.type))
5612 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5613 					 MGMT_STATUS_INVALID_PARAMS,
5614 					 &cp->addr, sizeof(cp->addr));
5615 
5616 	hci_dev_lock(hdev);
5617 
5618 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5619 				  cp->addr.type);
5620 	if (err < 0) {
5621 		status = MGMT_STATUS_FAILED;
5622 		goto done;
5623 	}
5624 
5625 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5626 		   sk);
5627 	status = MGMT_STATUS_SUCCESS;
5628 
5629 done:
5630 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5631 				&cp->addr, sizeof(cp->addr));
5632 
5633 	hci_dev_unlock(hdev);
5634 
5635 	return err;
5636 }
5637 
5638 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5639 			  u16 len)
5640 {
5641 	struct mgmt_cp_unblock_device *cp = data;
5642 	u8 status;
5643 	int err;
5644 
5645 	bt_dev_dbg(hdev, "sock %p", sk);
5646 
5647 	if (!bdaddr_type_is_valid(cp->addr.type))
5648 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5649 					 MGMT_STATUS_INVALID_PARAMS,
5650 					 &cp->addr, sizeof(cp->addr));
5651 
5652 	hci_dev_lock(hdev);
5653 
5654 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5655 				  cp->addr.type);
5656 	if (err < 0) {
5657 		status = MGMT_STATUS_INVALID_PARAMS;
5658 		goto done;
5659 	}
5660 
5661 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5662 		   sk);
5663 	status = MGMT_STATUS_SUCCESS;
5664 
5665 done:
5666 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5667 				&cp->addr, sizeof(cp->addr));
5668 
5669 	hci_dev_unlock(hdev);
5670 
5671 	return err;
5672 }
5673 
5674 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5675 {
5676 	return hci_update_eir_sync(hdev);
5677 }
5678 
5679 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5680 			 u16 len)
5681 {
5682 	struct mgmt_cp_set_device_id *cp = data;
5683 	int err;
5684 	__u16 source;
5685 
5686 	bt_dev_dbg(hdev, "sock %p", sk);
5687 
5688 	source = __le16_to_cpu(cp->source);
5689 
5690 	if (source > 0x0002)
5691 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5692 				       MGMT_STATUS_INVALID_PARAMS);
5693 
5694 	hci_dev_lock(hdev);
5695 
5696 	hdev->devid_source = source;
5697 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5698 	hdev->devid_product = __le16_to_cpu(cp->product);
5699 	hdev->devid_version = __le16_to_cpu(cp->version);
5700 
5701 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5702 				NULL, 0);
5703 
5704 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5705 
5706 	hci_dev_unlock(hdev);
5707 
5708 	return err;
5709 }
5710 
5711 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5712 {
5713 	if (err)
5714 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5715 	else
5716 		bt_dev_dbg(hdev, "status %d", err);
5717 }
5718 
5719 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5720 {
5721 	struct cmd_lookup match = { NULL, hdev };
5722 	u8 instance;
5723 	struct adv_info *adv_instance;
5724 	u8 status = mgmt_status(err);
5725 
5726 	if (status) {
5727 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5728 				     cmd_status_rsp, &status);
5729 		return;
5730 	}
5731 
5732 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5733 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5734 	else
5735 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5736 
5737 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5738 			     &match);
5739 
5740 	new_settings(hdev, match.sk);
5741 
5742 	if (match.sk)
5743 		sock_put(match.sk);
5744 
5745 	/* If "Set Advertising" was just disabled and instance advertising was
5746 	 * set up earlier, then re-enable multi-instance advertising.
5747 	 */
5748 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5749 	    list_empty(&hdev->adv_instances))
5750 		return;
5751 
5752 	instance = hdev->cur_adv_instance;
5753 	if (!instance) {
5754 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5755 							struct adv_info, list);
5756 		if (!adv_instance)
5757 			return;
5758 
5759 		instance = adv_instance->instance;
5760 	}
5761 
5762 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5763 
5764 	enable_advertising_instance(hdev, err);
5765 }
5766 
5767 static int set_adv_sync(struct hci_dev *hdev, void *data)
5768 {
5769 	struct mgmt_pending_cmd *cmd = data;
5770 	struct mgmt_mode *cp = cmd->param;
5771 	u8 val = !!cp->val;
5772 
5773 	if (cp->val == 0x02)
5774 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5775 	else
5776 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5777 
5778 	cancel_adv_timeout(hdev);
5779 
5780 	if (val) {
5781 		/* Switch to instance "0" for the Set Advertising setting.
5782 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5783 		 * HCI_ADVERTISING flag is not yet set.
5784 		 */
5785 		hdev->cur_adv_instance = 0x00;
5786 
5787 		if (ext_adv_capable(hdev)) {
5788 			hci_start_ext_adv_sync(hdev, 0x00);
5789 		} else {
5790 			hci_update_adv_data_sync(hdev, 0x00);
5791 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5792 			hci_enable_advertising_sync(hdev);
5793 		}
5794 	} else {
5795 		hci_disable_advertising_sync(hdev);
5796 	}
5797 
5798 	return 0;
5799 }
5800 
5801 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5802 			   u16 len)
5803 {
5804 	struct mgmt_mode *cp = data;
5805 	struct mgmt_pending_cmd *cmd;
5806 	u8 val, status;
5807 	int err;
5808 
5809 	bt_dev_dbg(hdev, "sock %p", sk);
5810 
5811 	status = mgmt_le_support(hdev);
5812 	if (status)
5813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5814 				       status);
5815 
5816 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5817 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5818 				       MGMT_STATUS_INVALID_PARAMS);
5819 
5820 	if (hdev->advertising_paused)
5821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5822 				       MGMT_STATUS_BUSY);
5823 
5824 	hci_dev_lock(hdev);
5825 
5826 	val = !!cp->val;
5827 
5828 	/* The following conditions are ones which mean that we should
5829 	 * not do any HCI communication but directly send a mgmt
5830 	 * response to user space (after toggling the flag if
5831 	 * necessary).
5832 	 */
5833 	if (!hdev_is_powered(hdev) ||
5834 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5835 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5836 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5837 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5838 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5839 		bool changed;
5840 
5841 		if (cp->val) {
5842 			hdev->cur_adv_instance = 0x00;
5843 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5844 			if (cp->val == 0x02)
5845 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5846 			else
5847 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5848 		} else {
5849 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5850 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5851 		}
5852 
5853 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5854 		if (err < 0)
5855 			goto unlock;
5856 
5857 		if (changed)
5858 			err = new_settings(hdev, sk);
5859 
5860 		goto unlock;
5861 	}
5862 
5863 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5864 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5865 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5866 				      MGMT_STATUS_BUSY);
5867 		goto unlock;
5868 	}
5869 
5870 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5871 	if (!cmd)
5872 		err = -ENOMEM;
5873 	else
5874 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5875 					 set_advertising_complete);
5876 
5877 	if (err < 0 && cmd)
5878 		mgmt_pending_remove(cmd);
5879 
5880 unlock:
5881 	hci_dev_unlock(hdev);
5882 	return err;
5883 }
5884 
5885 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5886 			      void *data, u16 len)
5887 {
5888 	struct mgmt_cp_set_static_address *cp = data;
5889 	int err;
5890 
5891 	bt_dev_dbg(hdev, "sock %p", sk);
5892 
5893 	if (!lmp_le_capable(hdev))
5894 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5895 				       MGMT_STATUS_NOT_SUPPORTED);
5896 
5897 	if (hdev_is_powered(hdev))
5898 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5899 				       MGMT_STATUS_REJECTED);
5900 
5901 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5902 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5903 			return mgmt_cmd_status(sk, hdev->id,
5904 					       MGMT_OP_SET_STATIC_ADDRESS,
5905 					       MGMT_STATUS_INVALID_PARAMS);
5906 
5907 		/* Two most significant bits shall be set */
5908 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5909 			return mgmt_cmd_status(sk, hdev->id,
5910 					       MGMT_OP_SET_STATIC_ADDRESS,
5911 					       MGMT_STATUS_INVALID_PARAMS);
5912 	}
5913 
5914 	hci_dev_lock(hdev);
5915 
5916 	bacpy(&hdev->static_addr, &cp->bdaddr);
5917 
5918 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5919 	if (err < 0)
5920 		goto unlock;
5921 
5922 	err = new_settings(hdev, sk);
5923 
5924 unlock:
5925 	hci_dev_unlock(hdev);
5926 	return err;
5927 }
5928 
5929 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5930 			   void *data, u16 len)
5931 {
5932 	struct mgmt_cp_set_scan_params *cp = data;
5933 	__u16 interval, window;
5934 	int err;
5935 
5936 	bt_dev_dbg(hdev, "sock %p", sk);
5937 
5938 	if (!lmp_le_capable(hdev))
5939 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5940 				       MGMT_STATUS_NOT_SUPPORTED);
5941 
5942 	interval = __le16_to_cpu(cp->interval);
5943 
5944 	if (interval < 0x0004 || interval > 0x4000)
5945 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5946 				       MGMT_STATUS_INVALID_PARAMS);
5947 
5948 	window = __le16_to_cpu(cp->window);
5949 
5950 	if (window < 0x0004 || window > 0x4000)
5951 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5952 				       MGMT_STATUS_INVALID_PARAMS);
5953 
5954 	if (window > interval)
5955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5956 				       MGMT_STATUS_INVALID_PARAMS);
5957 
5958 	hci_dev_lock(hdev);
5959 
5960 	hdev->le_scan_interval = interval;
5961 	hdev->le_scan_window = window;
5962 
5963 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5964 				NULL, 0);
5965 
5966 	/* If background scan is running, restart it so new parameters are
5967 	 * loaded.
5968 	 */
5969 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5970 	    hdev->discovery.state == DISCOVERY_STOPPED)
5971 		hci_update_passive_scan(hdev);
5972 
5973 	hci_dev_unlock(hdev);
5974 
5975 	return err;
5976 }
5977 
5978 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
5979 {
5980 	struct mgmt_pending_cmd *cmd = data;
5981 
5982 	bt_dev_dbg(hdev, "err %d", err);
5983 
5984 	if (err) {
5985 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5986 				mgmt_status(err));
5987 	} else {
5988 		struct mgmt_mode *cp = cmd->param;
5989 
5990 		if (cp->val)
5991 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5992 		else
5993 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5994 
5995 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5996 		new_settings(hdev, cmd->sk);
5997 	}
5998 
5999 	mgmt_pending_free(cmd);
6000 }
6001 
6002 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6003 {
6004 	struct mgmt_pending_cmd *cmd = data;
6005 	struct mgmt_mode *cp = cmd->param;
6006 
6007 	return hci_write_fast_connectable_sync(hdev, cp->val);
6008 }
6009 
6010 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6011 				void *data, u16 len)
6012 {
6013 	struct mgmt_mode *cp = data;
6014 	struct mgmt_pending_cmd *cmd;
6015 	int err;
6016 
6017 	bt_dev_dbg(hdev, "sock %p", sk);
6018 
6019 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6020 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6021 		return mgmt_cmd_status(sk, hdev->id,
6022 				       MGMT_OP_SET_FAST_CONNECTABLE,
6023 				       MGMT_STATUS_NOT_SUPPORTED);
6024 
6025 	if (cp->val != 0x00 && cp->val != 0x01)
6026 		return mgmt_cmd_status(sk, hdev->id,
6027 				       MGMT_OP_SET_FAST_CONNECTABLE,
6028 				       MGMT_STATUS_INVALID_PARAMS);
6029 
6030 	hci_dev_lock(hdev);
6031 
6032 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6033 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6034 		goto unlock;
6035 	}
6036 
6037 	if (!hdev_is_powered(hdev)) {
6038 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6039 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6040 		new_settings(hdev, sk);
6041 		goto unlock;
6042 	}
6043 
6044 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6045 			       len);
6046 	if (!cmd)
6047 		err = -ENOMEM;
6048 	else
6049 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6050 					 fast_connectable_complete);
6051 
6052 	if (err < 0) {
6053 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6054 				MGMT_STATUS_FAILED);
6055 
6056 		if (cmd)
6057 			mgmt_pending_free(cmd);
6058 	}
6059 
6060 unlock:
6061 	hci_dev_unlock(hdev);
6062 
6063 	return err;
6064 }
6065 
6066 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6067 {
6068 	struct mgmt_pending_cmd *cmd = data;
6069 
6070 	bt_dev_dbg(hdev, "err %d", err);
6071 
6072 	if (err) {
6073 		u8 mgmt_err = mgmt_status(err);
6074 
6075 		/* We need to restore the flag if related HCI commands
6076 		 * failed.
6077 		 */
6078 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6079 
6080 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6081 	} else {
6082 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6083 		new_settings(hdev, cmd->sk);
6084 	}
6085 
6086 	mgmt_pending_free(cmd);
6087 }
6088 
6089 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6090 {
6091 	int status;
6092 
6093 	status = hci_write_fast_connectable_sync(hdev, false);
6094 
6095 	if (!status)
6096 		status = hci_update_scan_sync(hdev);
6097 
6098 	/* Since only the advertising data flags will change, there
6099 	 * is no need to update the scan response data.
6100 	 */
6101 	if (!status)
6102 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6103 
6104 	return status;
6105 }
6106 
6107 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6108 {
6109 	struct mgmt_mode *cp = data;
6110 	struct mgmt_pending_cmd *cmd;
6111 	int err;
6112 
6113 	bt_dev_dbg(hdev, "sock %p", sk);
6114 
6115 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6116 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6117 				       MGMT_STATUS_NOT_SUPPORTED);
6118 
6119 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6120 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6121 				       MGMT_STATUS_REJECTED);
6122 
6123 	if (cp->val != 0x00 && cp->val != 0x01)
6124 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6125 				       MGMT_STATUS_INVALID_PARAMS);
6126 
6127 	hci_dev_lock(hdev);
6128 
6129 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6130 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6131 		goto unlock;
6132 	}
6133 
6134 	if (!hdev_is_powered(hdev)) {
6135 		if (!cp->val) {
6136 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6137 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6138 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6139 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6140 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6141 		}
6142 
6143 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6144 
6145 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6146 		if (err < 0)
6147 			goto unlock;
6148 
6149 		err = new_settings(hdev, sk);
6150 		goto unlock;
6151 	}
6152 
6153 	/* Reject disabling when powered on */
6154 	if (!cp->val) {
6155 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6156 				      MGMT_STATUS_REJECTED);
6157 		goto unlock;
6158 	} else {
6159 		/* When configuring a dual-mode controller to operate
6160 		 * with LE only and using a static address, then switching
6161 		 * BR/EDR back on is not allowed.
6162 		 *
6163 		 * Dual-mode controllers shall operate with the public
6164 		 * address as its identity address for BR/EDR and LE. So
6165 		 * reject the attempt to create an invalid configuration.
6166 		 *
6167 		 * The same restrictions applies when secure connections
6168 		 * has been enabled. For BR/EDR this is a controller feature
6169 		 * while for LE it is a host stack feature. This means that
6170 		 * switching BR/EDR back on when secure connections has been
6171 		 * enabled is not a supported transaction.
6172 		 */
6173 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6174 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6175 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6176 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6177 					      MGMT_STATUS_REJECTED);
6178 			goto unlock;
6179 		}
6180 	}
6181 
6182 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6183 	if (!cmd)
6184 		err = -ENOMEM;
6185 	else
6186 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6187 					 set_bredr_complete);
6188 
6189 	if (err < 0) {
6190 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6191 				MGMT_STATUS_FAILED);
6192 		if (cmd)
6193 			mgmt_pending_free(cmd);
6194 
6195 		goto unlock;
6196 	}
6197 
6198 	/* We need to flip the bit already here so that
6199 	 * hci_req_update_adv_data generates the correct flags.
6200 	 */
6201 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6202 
6203 unlock:
6204 	hci_dev_unlock(hdev);
6205 	return err;
6206 }
6207 
6208 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6209 {
6210 	struct mgmt_pending_cmd *cmd = data;
6211 	struct mgmt_mode *cp;
6212 
6213 	bt_dev_dbg(hdev, "err %d", err);
6214 
6215 	if (err) {
6216 		u8 mgmt_err = mgmt_status(err);
6217 
6218 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6219 		goto done;
6220 	}
6221 
6222 	cp = cmd->param;
6223 
6224 	switch (cp->val) {
6225 	case 0x00:
6226 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6227 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6228 		break;
6229 	case 0x01:
6230 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6231 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6232 		break;
6233 	case 0x02:
6234 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6235 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6236 		break;
6237 	}
6238 
6239 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6240 	new_settings(hdev, cmd->sk);
6241 
6242 done:
6243 	mgmt_pending_free(cmd);
6244 }
6245 
6246 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6247 {
6248 	struct mgmt_pending_cmd *cmd = data;
6249 	struct mgmt_mode *cp = cmd->param;
6250 	u8 val = !!cp->val;
6251 
6252 	/* Force write of val */
6253 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6254 
6255 	return hci_write_sc_support_sync(hdev, val);
6256 }
6257 
6258 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6259 			   void *data, u16 len)
6260 {
6261 	struct mgmt_mode *cp = data;
6262 	struct mgmt_pending_cmd *cmd;
6263 	u8 val;
6264 	int err;
6265 
6266 	bt_dev_dbg(hdev, "sock %p", sk);
6267 
6268 	if (!lmp_sc_capable(hdev) &&
6269 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6270 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6271 				       MGMT_STATUS_NOT_SUPPORTED);
6272 
6273 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6274 	    lmp_sc_capable(hdev) &&
6275 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6277 				       MGMT_STATUS_REJECTED);
6278 
6279 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6280 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6281 				       MGMT_STATUS_INVALID_PARAMS);
6282 
6283 	hci_dev_lock(hdev);
6284 
6285 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6286 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6287 		bool changed;
6288 
6289 		if (cp->val) {
6290 			changed = !hci_dev_test_and_set_flag(hdev,
6291 							     HCI_SC_ENABLED);
6292 			if (cp->val == 0x02)
6293 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6294 			else
6295 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6296 		} else {
6297 			changed = hci_dev_test_and_clear_flag(hdev,
6298 							      HCI_SC_ENABLED);
6299 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6300 		}
6301 
6302 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6303 		if (err < 0)
6304 			goto failed;
6305 
6306 		if (changed)
6307 			err = new_settings(hdev, sk);
6308 
6309 		goto failed;
6310 	}
6311 
6312 	val = !!cp->val;
6313 
6314 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6315 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6316 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6317 		goto failed;
6318 	}
6319 
6320 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6321 	if (!cmd)
6322 		err = -ENOMEM;
6323 	else
6324 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6325 					 set_secure_conn_complete);
6326 
6327 	if (err < 0) {
6328 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6329 				MGMT_STATUS_FAILED);
6330 		if (cmd)
6331 			mgmt_pending_free(cmd);
6332 	}
6333 
6334 failed:
6335 	hci_dev_unlock(hdev);
6336 	return err;
6337 }
6338 
6339 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6340 			  void *data, u16 len)
6341 {
6342 	struct mgmt_mode *cp = data;
6343 	bool changed, use_changed;
6344 	int err;
6345 
6346 	bt_dev_dbg(hdev, "sock %p", sk);
6347 
6348 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6350 				       MGMT_STATUS_INVALID_PARAMS);
6351 
6352 	hci_dev_lock(hdev);
6353 
6354 	if (cp->val)
6355 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6356 	else
6357 		changed = hci_dev_test_and_clear_flag(hdev,
6358 						      HCI_KEEP_DEBUG_KEYS);
6359 
6360 	if (cp->val == 0x02)
6361 		use_changed = !hci_dev_test_and_set_flag(hdev,
6362 							 HCI_USE_DEBUG_KEYS);
6363 	else
6364 		use_changed = hci_dev_test_and_clear_flag(hdev,
6365 							  HCI_USE_DEBUG_KEYS);
6366 
6367 	if (hdev_is_powered(hdev) && use_changed &&
6368 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6369 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6370 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6371 			     sizeof(mode), &mode);
6372 	}
6373 
6374 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6375 	if (err < 0)
6376 		goto unlock;
6377 
6378 	if (changed)
6379 		err = new_settings(hdev, sk);
6380 
6381 unlock:
6382 	hci_dev_unlock(hdev);
6383 	return err;
6384 }
6385 
6386 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6387 		       u16 len)
6388 {
6389 	struct mgmt_cp_set_privacy *cp = cp_data;
6390 	bool changed;
6391 	int err;
6392 
6393 	bt_dev_dbg(hdev, "sock %p", sk);
6394 
6395 	if (!lmp_le_capable(hdev))
6396 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6397 				       MGMT_STATUS_NOT_SUPPORTED);
6398 
6399 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6400 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6401 				       MGMT_STATUS_INVALID_PARAMS);
6402 
6403 	if (hdev_is_powered(hdev))
6404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6405 				       MGMT_STATUS_REJECTED);
6406 
6407 	hci_dev_lock(hdev);
6408 
6409 	/* If user space supports this command it is also expected to
6410 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6411 	 */
6412 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6413 
6414 	if (cp->privacy) {
6415 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6416 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6417 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6418 		hci_adv_instances_set_rpa_expired(hdev, true);
6419 		if (cp->privacy == 0x02)
6420 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6421 		else
6422 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6423 	} else {
6424 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6425 		memset(hdev->irk, 0, sizeof(hdev->irk));
6426 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6427 		hci_adv_instances_set_rpa_expired(hdev, false);
6428 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6429 	}
6430 
6431 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6432 	if (err < 0)
6433 		goto unlock;
6434 
6435 	if (changed)
6436 		err = new_settings(hdev, sk);
6437 
6438 unlock:
6439 	hci_dev_unlock(hdev);
6440 	return err;
6441 }
6442 
6443 static bool irk_is_valid(struct mgmt_irk_info *irk)
6444 {
6445 	switch (irk->addr.type) {
6446 	case BDADDR_LE_PUBLIC:
6447 		return true;
6448 
6449 	case BDADDR_LE_RANDOM:
6450 		/* Two most significant bits shall be set */
6451 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6452 			return false;
6453 		return true;
6454 	}
6455 
6456 	return false;
6457 }
6458 
6459 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6460 		     u16 len)
6461 {
6462 	struct mgmt_cp_load_irks *cp = cp_data;
6463 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6464 				   sizeof(struct mgmt_irk_info));
6465 	u16 irk_count, expected_len;
6466 	int i, err;
6467 
6468 	bt_dev_dbg(hdev, "sock %p", sk);
6469 
6470 	if (!lmp_le_capable(hdev))
6471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6472 				       MGMT_STATUS_NOT_SUPPORTED);
6473 
6474 	irk_count = __le16_to_cpu(cp->irk_count);
6475 	if (irk_count > max_irk_count) {
6476 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6477 			   irk_count);
6478 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6479 				       MGMT_STATUS_INVALID_PARAMS);
6480 	}
6481 
6482 	expected_len = struct_size(cp, irks, irk_count);
6483 	if (expected_len != len) {
6484 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6485 			   expected_len, len);
6486 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6487 				       MGMT_STATUS_INVALID_PARAMS);
6488 	}
6489 
6490 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6491 
6492 	for (i = 0; i < irk_count; i++) {
6493 		struct mgmt_irk_info *key = &cp->irks[i];
6494 
6495 		if (!irk_is_valid(key))
6496 			return mgmt_cmd_status(sk, hdev->id,
6497 					       MGMT_OP_LOAD_IRKS,
6498 					       MGMT_STATUS_INVALID_PARAMS);
6499 	}
6500 
6501 	hci_dev_lock(hdev);
6502 
6503 	hci_smp_irks_clear(hdev);
6504 
6505 	for (i = 0; i < irk_count; i++) {
6506 		struct mgmt_irk_info *irk = &cp->irks[i];
6507 
6508 		if (hci_is_blocked_key(hdev,
6509 				       HCI_BLOCKED_KEY_TYPE_IRK,
6510 				       irk->val)) {
6511 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6512 				    &irk->addr.bdaddr);
6513 			continue;
6514 		}
6515 
6516 		hci_add_irk(hdev, &irk->addr.bdaddr,
6517 			    le_addr_type(irk->addr.type), irk->val,
6518 			    BDADDR_ANY);
6519 	}
6520 
6521 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6522 
6523 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6524 
6525 	hci_dev_unlock(hdev);
6526 
6527 	return err;
6528 }
6529 
6530 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6531 {
6532 	if (key->initiator != 0x00 && key->initiator != 0x01)
6533 		return false;
6534 
6535 	switch (key->addr.type) {
6536 	case BDADDR_LE_PUBLIC:
6537 		return true;
6538 
6539 	case BDADDR_LE_RANDOM:
6540 		/* Two most significant bits shall be set */
6541 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6542 			return false;
6543 		return true;
6544 	}
6545 
6546 	return false;
6547 }
6548 
6549 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6550 			       void *cp_data, u16 len)
6551 {
6552 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6553 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6554 				   sizeof(struct mgmt_ltk_info));
6555 	u16 key_count, expected_len;
6556 	int i, err;
6557 
6558 	bt_dev_dbg(hdev, "sock %p", sk);
6559 
6560 	if (!lmp_le_capable(hdev))
6561 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6562 				       MGMT_STATUS_NOT_SUPPORTED);
6563 
6564 	key_count = __le16_to_cpu(cp->key_count);
6565 	if (key_count > max_key_count) {
6566 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6567 			   key_count);
6568 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6569 				       MGMT_STATUS_INVALID_PARAMS);
6570 	}
6571 
6572 	expected_len = struct_size(cp, keys, key_count);
6573 	if (expected_len != len) {
6574 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6575 			   expected_len, len);
6576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6577 				       MGMT_STATUS_INVALID_PARAMS);
6578 	}
6579 
6580 	bt_dev_dbg(hdev, "key_count %u", key_count);
6581 
6582 	for (i = 0; i < key_count; i++) {
6583 		struct mgmt_ltk_info *key = &cp->keys[i];
6584 
6585 		if (!ltk_is_valid(key))
6586 			return mgmt_cmd_status(sk, hdev->id,
6587 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6588 					       MGMT_STATUS_INVALID_PARAMS);
6589 	}
6590 
6591 	hci_dev_lock(hdev);
6592 
6593 	hci_smp_ltks_clear(hdev);
6594 
6595 	for (i = 0; i < key_count; i++) {
6596 		struct mgmt_ltk_info *key = &cp->keys[i];
6597 		u8 type, authenticated;
6598 
6599 		if (hci_is_blocked_key(hdev,
6600 				       HCI_BLOCKED_KEY_TYPE_LTK,
6601 				       key->val)) {
6602 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6603 				    &key->addr.bdaddr);
6604 			continue;
6605 		}
6606 
6607 		switch (key->type) {
6608 		case MGMT_LTK_UNAUTHENTICATED:
6609 			authenticated = 0x00;
6610 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6611 			break;
6612 		case MGMT_LTK_AUTHENTICATED:
6613 			authenticated = 0x01;
6614 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6615 			break;
6616 		case MGMT_LTK_P256_UNAUTH:
6617 			authenticated = 0x00;
6618 			type = SMP_LTK_P256;
6619 			break;
6620 		case MGMT_LTK_P256_AUTH:
6621 			authenticated = 0x01;
6622 			type = SMP_LTK_P256;
6623 			break;
6624 		case MGMT_LTK_P256_DEBUG:
6625 			authenticated = 0x00;
6626 			type = SMP_LTK_P256_DEBUG;
6627 			fallthrough;
6628 		default:
6629 			continue;
6630 		}
6631 
6632 		hci_add_ltk(hdev, &key->addr.bdaddr,
6633 			    le_addr_type(key->addr.type), type, authenticated,
6634 			    key->val, key->enc_size, key->ediv, key->rand);
6635 	}
6636 
6637 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6638 			   NULL, 0);
6639 
6640 	hci_dev_unlock(hdev);
6641 
6642 	return err;
6643 }
6644 
6645 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6646 {
6647 	struct mgmt_pending_cmd *cmd = data;
6648 	struct hci_conn *conn = cmd->user_data;
6649 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6650 	struct mgmt_rp_get_conn_info rp;
6651 	u8 status;
6652 
6653 	bt_dev_dbg(hdev, "err %d", err);
6654 
6655 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6656 
6657 	status = mgmt_status(err);
6658 	if (status == MGMT_STATUS_SUCCESS) {
6659 		rp.rssi = conn->rssi;
6660 		rp.tx_power = conn->tx_power;
6661 		rp.max_tx_power = conn->max_tx_power;
6662 	} else {
6663 		rp.rssi = HCI_RSSI_INVALID;
6664 		rp.tx_power = HCI_TX_POWER_INVALID;
6665 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6666 	}
6667 
6668 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6669 			  &rp, sizeof(rp));
6670 
6671 	if (conn) {
6672 		hci_conn_drop(conn);
6673 		hci_conn_put(conn);
6674 	}
6675 
6676 	mgmt_pending_free(cmd);
6677 }
6678 
6679 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6680 {
6681 	struct mgmt_pending_cmd *cmd = data;
6682 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6683 	struct hci_conn *conn;
6684 	int err;
6685 	__le16   handle;
6686 
6687 	/* Make sure we are still connected */
6688 	if (cp->addr.type == BDADDR_BREDR)
6689 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6690 					       &cp->addr.bdaddr);
6691 	else
6692 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6693 
6694 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6695 		if (cmd->user_data) {
6696 			hci_conn_drop(cmd->user_data);
6697 			hci_conn_put(cmd->user_data);
6698 			cmd->user_data = NULL;
6699 		}
6700 		return MGMT_STATUS_NOT_CONNECTED;
6701 	}
6702 
6703 	handle = cpu_to_le16(conn->handle);
6704 
6705 	/* Refresh RSSI each time */
6706 	err = hci_read_rssi_sync(hdev, handle);
6707 
6708 	/* For LE links TX power does not change thus we don't need to
6709 	 * query for it once value is known.
6710 	 */
6711 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6712 		     conn->tx_power == HCI_TX_POWER_INVALID))
6713 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6714 
6715 	/* Max TX power needs to be read only once per connection */
6716 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6717 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6718 
6719 	return err;
6720 }
6721 
6722 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6723 			 u16 len)
6724 {
6725 	struct mgmt_cp_get_conn_info *cp = data;
6726 	struct mgmt_rp_get_conn_info rp;
6727 	struct hci_conn *conn;
6728 	unsigned long conn_info_age;
6729 	int err = 0;
6730 
6731 	bt_dev_dbg(hdev, "sock %p", sk);
6732 
6733 	memset(&rp, 0, sizeof(rp));
6734 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6735 	rp.addr.type = cp->addr.type;
6736 
6737 	if (!bdaddr_type_is_valid(cp->addr.type))
6738 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6739 					 MGMT_STATUS_INVALID_PARAMS,
6740 					 &rp, sizeof(rp));
6741 
6742 	hci_dev_lock(hdev);
6743 
6744 	if (!hdev_is_powered(hdev)) {
6745 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6746 					MGMT_STATUS_NOT_POWERED, &rp,
6747 					sizeof(rp));
6748 		goto unlock;
6749 	}
6750 
6751 	if (cp->addr.type == BDADDR_BREDR)
6752 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6753 					       &cp->addr.bdaddr);
6754 	else
6755 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6756 
6757 	if (!conn || conn->state != BT_CONNECTED) {
6758 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6759 					MGMT_STATUS_NOT_CONNECTED, &rp,
6760 					sizeof(rp));
6761 		goto unlock;
6762 	}
6763 
6764 	/* To avoid client trying to guess when to poll again for information we
6765 	 * calculate conn info age as random value between min/max set in hdev.
6766 	 */
6767 	conn_info_age = hdev->conn_info_min_age +
6768 			prandom_u32_max(hdev->conn_info_max_age -
6769 					hdev->conn_info_min_age);
6770 
6771 	/* Query controller to refresh cached values if they are too old or were
6772 	 * never read.
6773 	 */
6774 	if (time_after(jiffies, conn->conn_info_timestamp +
6775 		       msecs_to_jiffies(conn_info_age)) ||
6776 	    !conn->conn_info_timestamp) {
6777 		struct mgmt_pending_cmd *cmd;
6778 
6779 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6780 				       len);
6781 		if (!cmd)
6782 			err = -ENOMEM;
6783 		else
6784 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6785 						 cmd, get_conn_info_complete);
6786 
6787 		if (err < 0) {
6788 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6790 
6791 			if (cmd)
6792 				mgmt_pending_free(cmd);
6793 
6794 			goto unlock;
6795 		}
6796 
6797 		hci_conn_hold(conn);
6798 		cmd->user_data = hci_conn_get(conn);
6799 
6800 		conn->conn_info_timestamp = jiffies;
6801 	} else {
6802 		/* Cache is valid, just reply with values cached in hci_conn */
6803 		rp.rssi = conn->rssi;
6804 		rp.tx_power = conn->tx_power;
6805 		rp.max_tx_power = conn->max_tx_power;
6806 
6807 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6808 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6809 	}
6810 
6811 unlock:
6812 	hci_dev_unlock(hdev);
6813 	return err;
6814 }
6815 
6816 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6817 {
6818 	struct mgmt_pending_cmd *cmd = data;
6819 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6820 	struct mgmt_rp_get_clock_info rp;
6821 	struct hci_conn *conn = cmd->user_data;
6822 	u8 status = mgmt_status(err);
6823 
6824 	bt_dev_dbg(hdev, "err %d", err);
6825 
6826 	memset(&rp, 0, sizeof(rp));
6827 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6828 	rp.addr.type = cp->addr.type;
6829 
6830 	if (err)
6831 		goto complete;
6832 
6833 	rp.local_clock = cpu_to_le32(hdev->clock);
6834 
6835 	if (conn) {
6836 		rp.piconet_clock = cpu_to_le32(conn->clock);
6837 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6838 		hci_conn_drop(conn);
6839 		hci_conn_put(conn);
6840 	}
6841 
6842 complete:
6843 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6844 			  sizeof(rp));
6845 
6846 	mgmt_pending_free(cmd);
6847 }
6848 
6849 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6850 {
6851 	struct mgmt_pending_cmd *cmd = data;
6852 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6853 	struct hci_cp_read_clock hci_cp;
6854 	struct hci_conn *conn = cmd->user_data;
6855 	int err;
6856 
6857 	memset(&hci_cp, 0, sizeof(hci_cp));
6858 	err = hci_read_clock_sync(hdev, &hci_cp);
6859 
6860 	if (conn) {
6861 		/* Make sure connection still exists */
6862 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6863 					       &cp->addr.bdaddr);
6864 
6865 		if (conn && conn == cmd->user_data &&
6866 		    conn->state == BT_CONNECTED) {
6867 			hci_cp.handle = cpu_to_le16(conn->handle);
6868 			hci_cp.which = 0x01; /* Piconet clock */
6869 			err = hci_read_clock_sync(hdev, &hci_cp);
6870 		} else if (cmd->user_data) {
6871 			hci_conn_drop(cmd->user_data);
6872 			hci_conn_put(cmd->user_data);
6873 			cmd->user_data = NULL;
6874 		}
6875 	}
6876 
6877 	return err;
6878 }
6879 
6880 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6881 								u16 len)
6882 {
6883 	struct mgmt_cp_get_clock_info *cp = data;
6884 	struct mgmt_rp_get_clock_info rp;
6885 	struct mgmt_pending_cmd *cmd;
6886 	struct hci_conn *conn;
6887 	int err;
6888 
6889 	bt_dev_dbg(hdev, "sock %p", sk);
6890 
6891 	memset(&rp, 0, sizeof(rp));
6892 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6893 	rp.addr.type = cp->addr.type;
6894 
6895 	if (cp->addr.type != BDADDR_BREDR)
6896 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6897 					 MGMT_STATUS_INVALID_PARAMS,
6898 					 &rp, sizeof(rp));
6899 
6900 	hci_dev_lock(hdev);
6901 
6902 	if (!hdev_is_powered(hdev)) {
6903 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6904 					MGMT_STATUS_NOT_POWERED, &rp,
6905 					sizeof(rp));
6906 		goto unlock;
6907 	}
6908 
6909 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6910 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6911 					       &cp->addr.bdaddr);
6912 		if (!conn || conn->state != BT_CONNECTED) {
6913 			err = mgmt_cmd_complete(sk, hdev->id,
6914 						MGMT_OP_GET_CLOCK_INFO,
6915 						MGMT_STATUS_NOT_CONNECTED,
6916 						&rp, sizeof(rp));
6917 			goto unlock;
6918 		}
6919 	} else {
6920 		conn = NULL;
6921 	}
6922 
6923 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6924 	if (!cmd)
6925 		err = -ENOMEM;
6926 	else
6927 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6928 					 get_clock_info_complete);
6929 
6930 	if (err < 0) {
6931 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6932 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6933 
6934 		if (cmd)
6935 			mgmt_pending_free(cmd);
6936 
6937 	} else if (conn) {
6938 		hci_conn_hold(conn);
6939 		cmd->user_data = hci_conn_get(conn);
6940 	}
6941 
6942 
6943 unlock:
6944 	hci_dev_unlock(hdev);
6945 	return err;
6946 }
6947 
6948 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6949 {
6950 	struct hci_conn *conn;
6951 
6952 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6953 	if (!conn)
6954 		return false;
6955 
6956 	if (conn->dst_type != type)
6957 		return false;
6958 
6959 	if (conn->state != BT_CONNECTED)
6960 		return false;
6961 
6962 	return true;
6963 }
6964 
6965 /* This function requires the caller holds hdev->lock */
6966 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6967 			       u8 addr_type, u8 auto_connect)
6968 {
6969 	struct hci_conn_params *params;
6970 
6971 	params = hci_conn_params_add(hdev, addr, addr_type);
6972 	if (!params)
6973 		return -EIO;
6974 
6975 	if (params->auto_connect == auto_connect)
6976 		return 0;
6977 
6978 	list_del_init(&params->action);
6979 
6980 	switch (auto_connect) {
6981 	case HCI_AUTO_CONN_DISABLED:
6982 	case HCI_AUTO_CONN_LINK_LOSS:
6983 		/* If auto connect is being disabled when we're trying to
6984 		 * connect to device, keep connecting.
6985 		 */
6986 		if (params->explicit_connect)
6987 			list_add(&params->action, &hdev->pend_le_conns);
6988 		break;
6989 	case HCI_AUTO_CONN_REPORT:
6990 		if (params->explicit_connect)
6991 			list_add(&params->action, &hdev->pend_le_conns);
6992 		else
6993 			list_add(&params->action, &hdev->pend_le_reports);
6994 		break;
6995 	case HCI_AUTO_CONN_DIRECT:
6996 	case HCI_AUTO_CONN_ALWAYS:
6997 		if (!is_connected(hdev, addr, addr_type))
6998 			list_add(&params->action, &hdev->pend_le_conns);
6999 		break;
7000 	}
7001 
7002 	params->auto_connect = auto_connect;
7003 
7004 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7005 		   addr, addr_type, auto_connect);
7006 
7007 	return 0;
7008 }
7009 
7010 static void device_added(struct sock *sk, struct hci_dev *hdev,
7011 			 bdaddr_t *bdaddr, u8 type, u8 action)
7012 {
7013 	struct mgmt_ev_device_added ev;
7014 
7015 	bacpy(&ev.addr.bdaddr, bdaddr);
7016 	ev.addr.type = type;
7017 	ev.action = action;
7018 
7019 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7020 }
7021 
7022 static int add_device_sync(struct hci_dev *hdev, void *data)
7023 {
7024 	return hci_update_passive_scan_sync(hdev);
7025 }
7026 
7027 static int add_device(struct sock *sk, struct hci_dev *hdev,
7028 		      void *data, u16 len)
7029 {
7030 	struct mgmt_cp_add_device *cp = data;
7031 	u8 auto_conn, addr_type;
7032 	struct hci_conn_params *params;
7033 	int err;
7034 	u32 current_flags = 0;
7035 	u32 supported_flags;
7036 
7037 	bt_dev_dbg(hdev, "sock %p", sk);
7038 
7039 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7040 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7041 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7042 					 MGMT_STATUS_INVALID_PARAMS,
7043 					 &cp->addr, sizeof(cp->addr));
7044 
7045 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7046 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7047 					 MGMT_STATUS_INVALID_PARAMS,
7048 					 &cp->addr, sizeof(cp->addr));
7049 
7050 	hci_dev_lock(hdev);
7051 
7052 	if (cp->addr.type == BDADDR_BREDR) {
7053 		/* Only incoming connections action is supported for now */
7054 		if (cp->action != 0x01) {
7055 			err = mgmt_cmd_complete(sk, hdev->id,
7056 						MGMT_OP_ADD_DEVICE,
7057 						MGMT_STATUS_INVALID_PARAMS,
7058 						&cp->addr, sizeof(cp->addr));
7059 			goto unlock;
7060 		}
7061 
7062 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7063 						     &cp->addr.bdaddr,
7064 						     cp->addr.type, 0);
7065 		if (err)
7066 			goto unlock;
7067 
7068 		hci_req_update_scan(hdev);
7069 
7070 		goto added;
7071 	}
7072 
7073 	addr_type = le_addr_type(cp->addr.type);
7074 
7075 	if (cp->action == 0x02)
7076 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7077 	else if (cp->action == 0x01)
7078 		auto_conn = HCI_AUTO_CONN_DIRECT;
7079 	else
7080 		auto_conn = HCI_AUTO_CONN_REPORT;
7081 
7082 	/* Kernel internally uses conn_params with resolvable private
7083 	 * address, but Add Device allows only identity addresses.
7084 	 * Make sure it is enforced before calling
7085 	 * hci_conn_params_lookup.
7086 	 */
7087 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7089 					MGMT_STATUS_INVALID_PARAMS,
7090 					&cp->addr, sizeof(cp->addr));
7091 		goto unlock;
7092 	}
7093 
7094 	/* If the connection parameters don't exist for this device,
7095 	 * they will be created and configured with defaults.
7096 	 */
7097 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7098 				auto_conn) < 0) {
7099 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7100 					MGMT_STATUS_FAILED, &cp->addr,
7101 					sizeof(cp->addr));
7102 		goto unlock;
7103 	} else {
7104 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7105 						addr_type);
7106 		if (params)
7107 			bitmap_to_arr32(&current_flags, params->flags,
7108 					__HCI_CONN_NUM_FLAGS);
7109 	}
7110 
7111 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7112 	if (err < 0)
7113 		goto unlock;
7114 
7115 added:
7116 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7117 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7118 			__HCI_CONN_NUM_FLAGS);
7119 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7120 			     supported_flags, current_flags);
7121 
7122 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7123 				MGMT_STATUS_SUCCESS, &cp->addr,
7124 				sizeof(cp->addr));
7125 
7126 unlock:
7127 	hci_dev_unlock(hdev);
7128 	return err;
7129 }
7130 
7131 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7132 			   bdaddr_t *bdaddr, u8 type)
7133 {
7134 	struct mgmt_ev_device_removed ev;
7135 
7136 	bacpy(&ev.addr.bdaddr, bdaddr);
7137 	ev.addr.type = type;
7138 
7139 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7140 }
7141 
7142 static int remove_device_sync(struct hci_dev *hdev, void *data)
7143 {
7144 	return hci_update_passive_scan_sync(hdev);
7145 }
7146 
7147 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7148 			 void *data, u16 len)
7149 {
7150 	struct mgmt_cp_remove_device *cp = data;
7151 	int err;
7152 
7153 	bt_dev_dbg(hdev, "sock %p", sk);
7154 
7155 	hci_dev_lock(hdev);
7156 
7157 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7158 		struct hci_conn_params *params;
7159 		u8 addr_type;
7160 
7161 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7162 			err = mgmt_cmd_complete(sk, hdev->id,
7163 						MGMT_OP_REMOVE_DEVICE,
7164 						MGMT_STATUS_INVALID_PARAMS,
7165 						&cp->addr, sizeof(cp->addr));
7166 			goto unlock;
7167 		}
7168 
7169 		if (cp->addr.type == BDADDR_BREDR) {
7170 			err = hci_bdaddr_list_del(&hdev->accept_list,
7171 						  &cp->addr.bdaddr,
7172 						  cp->addr.type);
7173 			if (err) {
7174 				err = mgmt_cmd_complete(sk, hdev->id,
7175 							MGMT_OP_REMOVE_DEVICE,
7176 							MGMT_STATUS_INVALID_PARAMS,
7177 							&cp->addr,
7178 							sizeof(cp->addr));
7179 				goto unlock;
7180 			}
7181 
7182 			hci_req_update_scan(hdev);
7183 
7184 			device_removed(sk, hdev, &cp->addr.bdaddr,
7185 				       cp->addr.type);
7186 			goto complete;
7187 		}
7188 
7189 		addr_type = le_addr_type(cp->addr.type);
7190 
7191 		/* Kernel internally uses conn_params with resolvable private
7192 		 * address, but Remove Device allows only identity addresses.
7193 		 * Make sure it is enforced before calling
7194 		 * hci_conn_params_lookup.
7195 		 */
7196 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7197 			err = mgmt_cmd_complete(sk, hdev->id,
7198 						MGMT_OP_REMOVE_DEVICE,
7199 						MGMT_STATUS_INVALID_PARAMS,
7200 						&cp->addr, sizeof(cp->addr));
7201 			goto unlock;
7202 		}
7203 
7204 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7205 						addr_type);
7206 		if (!params) {
7207 			err = mgmt_cmd_complete(sk, hdev->id,
7208 						MGMT_OP_REMOVE_DEVICE,
7209 						MGMT_STATUS_INVALID_PARAMS,
7210 						&cp->addr, sizeof(cp->addr));
7211 			goto unlock;
7212 		}
7213 
7214 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7215 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7216 			err = mgmt_cmd_complete(sk, hdev->id,
7217 						MGMT_OP_REMOVE_DEVICE,
7218 						MGMT_STATUS_INVALID_PARAMS,
7219 						&cp->addr, sizeof(cp->addr));
7220 			goto unlock;
7221 		}
7222 
7223 		list_del(&params->action);
7224 		list_del(&params->list);
7225 		kfree(params);
7226 
7227 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7228 	} else {
7229 		struct hci_conn_params *p, *tmp;
7230 		struct bdaddr_list *b, *btmp;
7231 
7232 		if (cp->addr.type) {
7233 			err = mgmt_cmd_complete(sk, hdev->id,
7234 						MGMT_OP_REMOVE_DEVICE,
7235 						MGMT_STATUS_INVALID_PARAMS,
7236 						&cp->addr, sizeof(cp->addr));
7237 			goto unlock;
7238 		}
7239 
7240 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7241 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7242 			list_del(&b->list);
7243 			kfree(b);
7244 		}
7245 
7246 		hci_req_update_scan(hdev);
7247 
7248 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7249 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7250 				continue;
7251 			device_removed(sk, hdev, &p->addr, p->addr_type);
7252 			if (p->explicit_connect) {
7253 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7254 				continue;
7255 			}
7256 			list_del(&p->action);
7257 			list_del(&p->list);
7258 			kfree(p);
7259 		}
7260 
7261 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7262 	}
7263 
7264 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7265 
7266 complete:
7267 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7268 				MGMT_STATUS_SUCCESS, &cp->addr,
7269 				sizeof(cp->addr));
7270 unlock:
7271 	hci_dev_unlock(hdev);
7272 	return err;
7273 }
7274 
7275 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7276 			   u16 len)
7277 {
7278 	struct mgmt_cp_load_conn_param *cp = data;
7279 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7280 				     sizeof(struct mgmt_conn_param));
7281 	u16 param_count, expected_len;
7282 	int i;
7283 
7284 	if (!lmp_le_capable(hdev))
7285 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7286 				       MGMT_STATUS_NOT_SUPPORTED);
7287 
7288 	param_count = __le16_to_cpu(cp->param_count);
7289 	if (param_count > max_param_count) {
7290 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7291 			   param_count);
7292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7293 				       MGMT_STATUS_INVALID_PARAMS);
7294 	}
7295 
7296 	expected_len = struct_size(cp, params, param_count);
7297 	if (expected_len != len) {
7298 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7299 			   expected_len, len);
7300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7301 				       MGMT_STATUS_INVALID_PARAMS);
7302 	}
7303 
7304 	bt_dev_dbg(hdev, "param_count %u", param_count);
7305 
7306 	hci_dev_lock(hdev);
7307 
7308 	hci_conn_params_clear_disabled(hdev);
7309 
7310 	for (i = 0; i < param_count; i++) {
7311 		struct mgmt_conn_param *param = &cp->params[i];
7312 		struct hci_conn_params *hci_param;
7313 		u16 min, max, latency, timeout;
7314 		u8 addr_type;
7315 
7316 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7317 			   param->addr.type);
7318 
7319 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7320 			addr_type = ADDR_LE_DEV_PUBLIC;
7321 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7322 			addr_type = ADDR_LE_DEV_RANDOM;
7323 		} else {
7324 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7325 			continue;
7326 		}
7327 
7328 		min = le16_to_cpu(param->min_interval);
7329 		max = le16_to_cpu(param->max_interval);
7330 		latency = le16_to_cpu(param->latency);
7331 		timeout = le16_to_cpu(param->timeout);
7332 
7333 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7334 			   min, max, latency, timeout);
7335 
7336 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7337 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7338 			continue;
7339 		}
7340 
7341 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7342 						addr_type);
7343 		if (!hci_param) {
7344 			bt_dev_err(hdev, "failed to add connection parameters");
7345 			continue;
7346 		}
7347 
7348 		hci_param->conn_min_interval = min;
7349 		hci_param->conn_max_interval = max;
7350 		hci_param->conn_latency = latency;
7351 		hci_param->supervision_timeout = timeout;
7352 	}
7353 
7354 	hci_dev_unlock(hdev);
7355 
7356 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7357 				 NULL, 0);
7358 }
7359 
7360 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7361 			       void *data, u16 len)
7362 {
7363 	struct mgmt_cp_set_external_config *cp = data;
7364 	bool changed;
7365 	int err;
7366 
7367 	bt_dev_dbg(hdev, "sock %p", sk);
7368 
7369 	if (hdev_is_powered(hdev))
7370 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7371 				       MGMT_STATUS_REJECTED);
7372 
7373 	if (cp->config != 0x00 && cp->config != 0x01)
7374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7375 				         MGMT_STATUS_INVALID_PARAMS);
7376 
7377 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7379 				       MGMT_STATUS_NOT_SUPPORTED);
7380 
7381 	hci_dev_lock(hdev);
7382 
7383 	if (cp->config)
7384 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7385 	else
7386 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7387 
7388 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7389 	if (err < 0)
7390 		goto unlock;
7391 
7392 	if (!changed)
7393 		goto unlock;
7394 
7395 	err = new_options(hdev, sk);
7396 
7397 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7398 		mgmt_index_removed(hdev);
7399 
7400 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7401 			hci_dev_set_flag(hdev, HCI_CONFIG);
7402 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7403 
7404 			queue_work(hdev->req_workqueue, &hdev->power_on);
7405 		} else {
7406 			set_bit(HCI_RAW, &hdev->flags);
7407 			mgmt_index_added(hdev);
7408 		}
7409 	}
7410 
7411 unlock:
7412 	hci_dev_unlock(hdev);
7413 	return err;
7414 }
7415 
7416 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7417 			      void *data, u16 len)
7418 {
7419 	struct mgmt_cp_set_public_address *cp = data;
7420 	bool changed;
7421 	int err;
7422 
7423 	bt_dev_dbg(hdev, "sock %p", sk);
7424 
7425 	if (hdev_is_powered(hdev))
7426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7427 				       MGMT_STATUS_REJECTED);
7428 
7429 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7431 				       MGMT_STATUS_INVALID_PARAMS);
7432 
7433 	if (!hdev->set_bdaddr)
7434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7435 				       MGMT_STATUS_NOT_SUPPORTED);
7436 
7437 	hci_dev_lock(hdev);
7438 
7439 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7440 	bacpy(&hdev->public_addr, &cp->bdaddr);
7441 
7442 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7443 	if (err < 0)
7444 		goto unlock;
7445 
7446 	if (!changed)
7447 		goto unlock;
7448 
7449 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7450 		err = new_options(hdev, sk);
7451 
7452 	if (is_configured(hdev)) {
7453 		mgmt_index_removed(hdev);
7454 
7455 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7456 
7457 		hci_dev_set_flag(hdev, HCI_CONFIG);
7458 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7459 
7460 		queue_work(hdev->req_workqueue, &hdev->power_on);
7461 	}
7462 
7463 unlock:
7464 	hci_dev_unlock(hdev);
7465 	return err;
7466 }
7467 
7468 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7469 					     int err)
7470 {
7471 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7472 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7473 	u8 *h192, *r192, *h256, *r256;
7474 	struct mgmt_pending_cmd *cmd = data;
7475 	struct sk_buff *skb = cmd->skb;
7476 	u8 status = mgmt_status(err);
7477 	u16 eir_len;
7478 
7479 	if (!status) {
7480 		if (!skb)
7481 			status = MGMT_STATUS_FAILED;
7482 		else if (IS_ERR(skb))
7483 			status = mgmt_status(PTR_ERR(skb));
7484 		else
7485 			status = mgmt_status(skb->data[0]);
7486 	}
7487 
7488 	bt_dev_dbg(hdev, "status %u", status);
7489 
7490 	mgmt_cp = cmd->param;
7491 
7492 	if (status) {
7493 		status = mgmt_status(status);
7494 		eir_len = 0;
7495 
7496 		h192 = NULL;
7497 		r192 = NULL;
7498 		h256 = NULL;
7499 		r256 = NULL;
7500 	} else if (!bredr_sc_enabled(hdev)) {
7501 		struct hci_rp_read_local_oob_data *rp;
7502 
7503 		if (skb->len != sizeof(*rp)) {
7504 			status = MGMT_STATUS_FAILED;
7505 			eir_len = 0;
7506 		} else {
7507 			status = MGMT_STATUS_SUCCESS;
7508 			rp = (void *)skb->data;
7509 
7510 			eir_len = 5 + 18 + 18;
7511 			h192 = rp->hash;
7512 			r192 = rp->rand;
7513 			h256 = NULL;
7514 			r256 = NULL;
7515 		}
7516 	} else {
7517 		struct hci_rp_read_local_oob_ext_data *rp;
7518 
7519 		if (skb->len != sizeof(*rp)) {
7520 			status = MGMT_STATUS_FAILED;
7521 			eir_len = 0;
7522 		} else {
7523 			status = MGMT_STATUS_SUCCESS;
7524 			rp = (void *)skb->data;
7525 
7526 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7527 				eir_len = 5 + 18 + 18;
7528 				h192 = NULL;
7529 				r192 = NULL;
7530 			} else {
7531 				eir_len = 5 + 18 + 18 + 18 + 18;
7532 				h192 = rp->hash192;
7533 				r192 = rp->rand192;
7534 			}
7535 
7536 			h256 = rp->hash256;
7537 			r256 = rp->rand256;
7538 		}
7539 	}
7540 
7541 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7542 	if (!mgmt_rp)
7543 		goto done;
7544 
7545 	if (eir_len == 0)
7546 		goto send_rsp;
7547 
7548 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7549 				  hdev->dev_class, 3);
7550 
7551 	if (h192 && r192) {
7552 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7553 					  EIR_SSP_HASH_C192, h192, 16);
7554 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7555 					  EIR_SSP_RAND_R192, r192, 16);
7556 	}
7557 
7558 	if (h256 && r256) {
7559 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7560 					  EIR_SSP_HASH_C256, h256, 16);
7561 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7562 					  EIR_SSP_RAND_R256, r256, 16);
7563 	}
7564 
7565 send_rsp:
7566 	mgmt_rp->type = mgmt_cp->type;
7567 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7568 
7569 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7570 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7571 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7572 	if (err < 0 || status)
7573 		goto done;
7574 
7575 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7576 
7577 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7578 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7579 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7580 done:
7581 	if (skb && !IS_ERR(skb))
7582 		kfree_skb(skb);
7583 
7584 	kfree(mgmt_rp);
7585 	mgmt_pending_remove(cmd);
7586 }
7587 
7588 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7589 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7590 {
7591 	struct mgmt_pending_cmd *cmd;
7592 	int err;
7593 
7594 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7595 			       cp, sizeof(*cp));
7596 	if (!cmd)
7597 		return -ENOMEM;
7598 
7599 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7600 				 read_local_oob_ext_data_complete);
7601 
7602 	if (err < 0) {
7603 		mgmt_pending_remove(cmd);
7604 		return err;
7605 	}
7606 
7607 	return 0;
7608 }
7609 
7610 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7611 				   void *data, u16 data_len)
7612 {
7613 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7614 	struct mgmt_rp_read_local_oob_ext_data *rp;
7615 	size_t rp_len;
7616 	u16 eir_len;
7617 	u8 status, flags, role, addr[7], hash[16], rand[16];
7618 	int err;
7619 
7620 	bt_dev_dbg(hdev, "sock %p", sk);
7621 
7622 	if (hdev_is_powered(hdev)) {
7623 		switch (cp->type) {
7624 		case BIT(BDADDR_BREDR):
7625 			status = mgmt_bredr_support(hdev);
7626 			if (status)
7627 				eir_len = 0;
7628 			else
7629 				eir_len = 5;
7630 			break;
7631 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7632 			status = mgmt_le_support(hdev);
7633 			if (status)
7634 				eir_len = 0;
7635 			else
7636 				eir_len = 9 + 3 + 18 + 18 + 3;
7637 			break;
7638 		default:
7639 			status = MGMT_STATUS_INVALID_PARAMS;
7640 			eir_len = 0;
7641 			break;
7642 		}
7643 	} else {
7644 		status = MGMT_STATUS_NOT_POWERED;
7645 		eir_len = 0;
7646 	}
7647 
7648 	rp_len = sizeof(*rp) + eir_len;
7649 	rp = kmalloc(rp_len, GFP_ATOMIC);
7650 	if (!rp)
7651 		return -ENOMEM;
7652 
7653 	if (!status && !lmp_ssp_capable(hdev)) {
7654 		status = MGMT_STATUS_NOT_SUPPORTED;
7655 		eir_len = 0;
7656 	}
7657 
7658 	if (status)
7659 		goto complete;
7660 
7661 	hci_dev_lock(hdev);
7662 
7663 	eir_len = 0;
7664 	switch (cp->type) {
7665 	case BIT(BDADDR_BREDR):
7666 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7667 			err = read_local_ssp_oob_req(hdev, sk, cp);
7668 			hci_dev_unlock(hdev);
7669 			if (!err)
7670 				goto done;
7671 
7672 			status = MGMT_STATUS_FAILED;
7673 			goto complete;
7674 		} else {
7675 			eir_len = eir_append_data(rp->eir, eir_len,
7676 						  EIR_CLASS_OF_DEV,
7677 						  hdev->dev_class, 3);
7678 		}
7679 		break;
7680 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7681 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7682 		    smp_generate_oob(hdev, hash, rand) < 0) {
7683 			hci_dev_unlock(hdev);
7684 			status = MGMT_STATUS_FAILED;
7685 			goto complete;
7686 		}
7687 
7688 		/* This should return the active RPA, but since the RPA
7689 		 * is only programmed on demand, it is really hard to fill
7690 		 * this in at the moment. For now disallow retrieving
7691 		 * local out-of-band data when privacy is in use.
7692 		 *
7693 		 * Returning the identity address will not help here since
7694 		 * pairing happens before the identity resolving key is
7695 		 * known and thus the connection establishment happens
7696 		 * based on the RPA and not the identity address.
7697 		 */
7698 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7699 			hci_dev_unlock(hdev);
7700 			status = MGMT_STATUS_REJECTED;
7701 			goto complete;
7702 		}
7703 
7704 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7705 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7706 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7707 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7708 			memcpy(addr, &hdev->static_addr, 6);
7709 			addr[6] = 0x01;
7710 		} else {
7711 			memcpy(addr, &hdev->bdaddr, 6);
7712 			addr[6] = 0x00;
7713 		}
7714 
7715 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7716 					  addr, sizeof(addr));
7717 
7718 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7719 			role = 0x02;
7720 		else
7721 			role = 0x01;
7722 
7723 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7724 					  &role, sizeof(role));
7725 
7726 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7727 			eir_len = eir_append_data(rp->eir, eir_len,
7728 						  EIR_LE_SC_CONFIRM,
7729 						  hash, sizeof(hash));
7730 
7731 			eir_len = eir_append_data(rp->eir, eir_len,
7732 						  EIR_LE_SC_RANDOM,
7733 						  rand, sizeof(rand));
7734 		}
7735 
7736 		flags = mgmt_get_adv_discov_flags(hdev);
7737 
7738 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7739 			flags |= LE_AD_NO_BREDR;
7740 
7741 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7742 					  &flags, sizeof(flags));
7743 		break;
7744 	}
7745 
7746 	hci_dev_unlock(hdev);
7747 
7748 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7749 
7750 	status = MGMT_STATUS_SUCCESS;
7751 
7752 complete:
7753 	rp->type = cp->type;
7754 	rp->eir_len = cpu_to_le16(eir_len);
7755 
7756 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7757 				status, rp, sizeof(*rp) + eir_len);
7758 	if (err < 0 || status)
7759 		goto done;
7760 
7761 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7762 				 rp, sizeof(*rp) + eir_len,
7763 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7764 
7765 done:
7766 	kfree(rp);
7767 
7768 	return err;
7769 }
7770 
7771 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7772 {
7773 	u32 flags = 0;
7774 
7775 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7776 	flags |= MGMT_ADV_FLAG_DISCOV;
7777 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7778 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7779 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7780 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7781 	flags |= MGMT_ADV_PARAM_DURATION;
7782 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7783 	flags |= MGMT_ADV_PARAM_INTERVALS;
7784 	flags |= MGMT_ADV_PARAM_TX_POWER;
7785 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7786 
7787 	/* In extended adv TX_POWER returned from Set Adv Param
7788 	 * will be always valid.
7789 	 */
7790 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7791 	    ext_adv_capable(hdev))
7792 		flags |= MGMT_ADV_FLAG_TX_POWER;
7793 
7794 	if (ext_adv_capable(hdev)) {
7795 		flags |= MGMT_ADV_FLAG_SEC_1M;
7796 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7797 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7798 
7799 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7800 			flags |= MGMT_ADV_FLAG_SEC_2M;
7801 
7802 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7803 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7804 	}
7805 
7806 	return flags;
7807 }
7808 
7809 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7810 			     void *data, u16 data_len)
7811 {
7812 	struct mgmt_rp_read_adv_features *rp;
7813 	size_t rp_len;
7814 	int err;
7815 	struct adv_info *adv_instance;
7816 	u32 supported_flags;
7817 	u8 *instance;
7818 
7819 	bt_dev_dbg(hdev, "sock %p", sk);
7820 
7821 	if (!lmp_le_capable(hdev))
7822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7823 				       MGMT_STATUS_REJECTED);
7824 
7825 	hci_dev_lock(hdev);
7826 
7827 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7828 	rp = kmalloc(rp_len, GFP_ATOMIC);
7829 	if (!rp) {
7830 		hci_dev_unlock(hdev);
7831 		return -ENOMEM;
7832 	}
7833 
7834 	supported_flags = get_supported_adv_flags(hdev);
7835 
7836 	rp->supported_flags = cpu_to_le32(supported_flags);
7837 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7838 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7839 	rp->max_instances = hdev->le_num_of_adv_sets;
7840 	rp->num_instances = hdev->adv_instance_cnt;
7841 
7842 	instance = rp->instance;
7843 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7844 		*instance = adv_instance->instance;
7845 		instance++;
7846 	}
7847 
7848 	hci_dev_unlock(hdev);
7849 
7850 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7851 				MGMT_STATUS_SUCCESS, rp, rp_len);
7852 
7853 	kfree(rp);
7854 
7855 	return err;
7856 }
7857 
7858 static u8 calculate_name_len(struct hci_dev *hdev)
7859 {
7860 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7861 
7862 	return eir_append_local_name(hdev, buf, 0);
7863 }
7864 
7865 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7866 			   bool is_adv_data)
7867 {
7868 	u8 max_len = HCI_MAX_AD_LENGTH;
7869 
7870 	if (is_adv_data) {
7871 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7872 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7873 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7874 			max_len -= 3;
7875 
7876 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7877 			max_len -= 3;
7878 	} else {
7879 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7880 			max_len -= calculate_name_len(hdev);
7881 
7882 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7883 			max_len -= 4;
7884 	}
7885 
7886 	return max_len;
7887 }
7888 
7889 static bool flags_managed(u32 adv_flags)
7890 {
7891 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7892 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7893 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7894 }
7895 
7896 static bool tx_power_managed(u32 adv_flags)
7897 {
7898 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7899 }
7900 
7901 static bool name_managed(u32 adv_flags)
7902 {
7903 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7904 }
7905 
7906 static bool appearance_managed(u32 adv_flags)
7907 {
7908 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7909 }
7910 
7911 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7912 			      u8 len, bool is_adv_data)
7913 {
7914 	int i, cur_len;
7915 	u8 max_len;
7916 
7917 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7918 
7919 	if (len > max_len)
7920 		return false;
7921 
7922 	/* Make sure that the data is correctly formatted. */
7923 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7924 		cur_len = data[i];
7925 
7926 		if (!cur_len)
7927 			continue;
7928 
7929 		if (data[i + 1] == EIR_FLAGS &&
7930 		    (!is_adv_data || flags_managed(adv_flags)))
7931 			return false;
7932 
7933 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7934 			return false;
7935 
7936 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7937 			return false;
7938 
7939 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7940 			return false;
7941 
7942 		if (data[i + 1] == EIR_APPEARANCE &&
7943 		    appearance_managed(adv_flags))
7944 			return false;
7945 
7946 		/* If the current field length would exceed the total data
7947 		 * length, then it's invalid.
7948 		 */
7949 		if (i + cur_len >= len)
7950 			return false;
7951 	}
7952 
7953 	return true;
7954 }
7955 
7956 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7957 {
7958 	u32 supported_flags, phy_flags;
7959 
7960 	/* The current implementation only supports a subset of the specified
7961 	 * flags. Also need to check mutual exclusiveness of sec flags.
7962 	 */
7963 	supported_flags = get_supported_adv_flags(hdev);
7964 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7965 	if (adv_flags & ~supported_flags ||
7966 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7967 		return false;
7968 
7969 	return true;
7970 }
7971 
7972 static bool adv_busy(struct hci_dev *hdev)
7973 {
7974 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7975 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7976 		pending_find(MGMT_OP_SET_LE, hdev) ||
7977 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7978 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7979 }
7980 
7981 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
7982 			     int err)
7983 {
7984 	struct adv_info *adv, *n;
7985 
7986 	bt_dev_dbg(hdev, "err %d", err);
7987 
7988 	hci_dev_lock(hdev);
7989 
7990 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
7991 		u8 instance;
7992 
7993 		if (!adv->pending)
7994 			continue;
7995 
7996 		if (!err) {
7997 			adv->pending = false;
7998 			continue;
7999 		}
8000 
8001 		instance = adv->instance;
8002 
8003 		if (hdev->cur_adv_instance == instance)
8004 			cancel_adv_timeout(hdev);
8005 
8006 		hci_remove_adv_instance(hdev, instance);
8007 		mgmt_advertising_removed(sk, hdev, instance);
8008 	}
8009 
8010 	hci_dev_unlock(hdev);
8011 }
8012 
8013 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8014 {
8015 	struct mgmt_pending_cmd *cmd = data;
8016 	struct mgmt_cp_add_advertising *cp = cmd->param;
8017 	struct mgmt_rp_add_advertising rp;
8018 
8019 	memset(&rp, 0, sizeof(rp));
8020 
8021 	rp.instance = cp->instance;
8022 
8023 	if (err)
8024 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8025 				mgmt_status(err));
8026 	else
8027 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8028 				  mgmt_status(err), &rp, sizeof(rp));
8029 
8030 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8031 
8032 	mgmt_pending_free(cmd);
8033 }
8034 
8035 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8036 {
8037 	struct mgmt_pending_cmd *cmd = data;
8038 	struct mgmt_cp_add_advertising *cp = cmd->param;
8039 
8040 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8041 }
8042 
8043 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8044 			   void *data, u16 data_len)
8045 {
8046 	struct mgmt_cp_add_advertising *cp = data;
8047 	struct mgmt_rp_add_advertising rp;
8048 	u32 flags;
8049 	u8 status;
8050 	u16 timeout, duration;
8051 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8052 	u8 schedule_instance = 0;
8053 	struct adv_info *next_instance;
8054 	int err;
8055 	struct mgmt_pending_cmd *cmd;
8056 
8057 	bt_dev_dbg(hdev, "sock %p", sk);
8058 
8059 	status = mgmt_le_support(hdev);
8060 	if (status)
8061 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8062 				       status);
8063 
8064 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8066 				       MGMT_STATUS_INVALID_PARAMS);
8067 
8068 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8069 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8070 				       MGMT_STATUS_INVALID_PARAMS);
8071 
8072 	flags = __le32_to_cpu(cp->flags);
8073 	timeout = __le16_to_cpu(cp->timeout);
8074 	duration = __le16_to_cpu(cp->duration);
8075 
8076 	if (!requested_adv_flags_are_valid(hdev, flags))
8077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8078 				       MGMT_STATUS_INVALID_PARAMS);
8079 
8080 	hci_dev_lock(hdev);
8081 
8082 	if (timeout && !hdev_is_powered(hdev)) {
8083 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8084 				      MGMT_STATUS_REJECTED);
8085 		goto unlock;
8086 	}
8087 
8088 	if (adv_busy(hdev)) {
8089 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8090 				      MGMT_STATUS_BUSY);
8091 		goto unlock;
8092 	}
8093 
8094 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8095 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8096 			       cp->scan_rsp_len, false)) {
8097 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8098 				      MGMT_STATUS_INVALID_PARAMS);
8099 		goto unlock;
8100 	}
8101 
8102 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8103 				   cp->adv_data_len, cp->data,
8104 				   cp->scan_rsp_len,
8105 				   cp->data + cp->adv_data_len,
8106 				   timeout, duration,
8107 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8108 				   hdev->le_adv_min_interval,
8109 				   hdev->le_adv_max_interval);
8110 	if (err < 0) {
8111 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8112 				      MGMT_STATUS_FAILED);
8113 		goto unlock;
8114 	}
8115 
8116 	/* Only trigger an advertising added event if a new instance was
8117 	 * actually added.
8118 	 */
8119 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8120 		mgmt_advertising_added(sk, hdev, cp->instance);
8121 
8122 	if (hdev->cur_adv_instance == cp->instance) {
8123 		/* If the currently advertised instance is being changed then
8124 		 * cancel the current advertising and schedule the next
8125 		 * instance. If there is only one instance then the overridden
8126 		 * advertising data will be visible right away.
8127 		 */
8128 		cancel_adv_timeout(hdev);
8129 
8130 		next_instance = hci_get_next_instance(hdev, cp->instance);
8131 		if (next_instance)
8132 			schedule_instance = next_instance->instance;
8133 	} else if (!hdev->adv_instance_timeout) {
8134 		/* Immediately advertise the new instance if no other
8135 		 * instance is currently being advertised.
8136 		 */
8137 		schedule_instance = cp->instance;
8138 	}
8139 
8140 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8141 	 * there is no instance to be advertised then we have no HCI
8142 	 * communication to make. Simply return.
8143 	 */
8144 	if (!hdev_is_powered(hdev) ||
8145 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8146 	    !schedule_instance) {
8147 		rp.instance = cp->instance;
8148 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8149 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8150 		goto unlock;
8151 	}
8152 
8153 	/* We're good to go, update advertising data, parameters, and start
8154 	 * advertising.
8155 	 */
8156 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8157 			       data_len);
8158 	if (!cmd) {
8159 		err = -ENOMEM;
8160 		goto unlock;
8161 	}
8162 
8163 	cp->instance = schedule_instance;
8164 
8165 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8166 				 add_advertising_complete);
8167 	if (err < 0)
8168 		mgmt_pending_free(cmd);
8169 
8170 unlock:
8171 	hci_dev_unlock(hdev);
8172 
8173 	return err;
8174 }
8175 
8176 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8177 					int err)
8178 {
8179 	struct mgmt_pending_cmd *cmd = data;
8180 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8181 	struct mgmt_rp_add_ext_adv_params rp;
8182 	struct adv_info *adv;
8183 	u32 flags;
8184 
8185 	BT_DBG("%s", hdev->name);
8186 
8187 	hci_dev_lock(hdev);
8188 
8189 	adv = hci_find_adv_instance(hdev, cp->instance);
8190 	if (!adv)
8191 		goto unlock;
8192 
8193 	rp.instance = cp->instance;
8194 	rp.tx_power = adv->tx_power;
8195 
8196 	/* While we're at it, inform userspace of the available space for this
8197 	 * advertisement, given the flags that will be used.
8198 	 */
8199 	flags = __le32_to_cpu(cp->flags);
8200 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8201 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8202 
8203 	if (err) {
8204 		/* If this advertisement was previously advertising and we
8205 		 * failed to update it, we signal that it has been removed and
8206 		 * delete its structure
8207 		 */
8208 		if (!adv->pending)
8209 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8210 
8211 		hci_remove_adv_instance(hdev, cp->instance);
8212 
8213 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8214 				mgmt_status(err));
8215 	} else {
8216 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8217 				  mgmt_status(err), &rp, sizeof(rp));
8218 	}
8219 
8220 unlock:
8221 	if (cmd)
8222 		mgmt_pending_free(cmd);
8223 
8224 	hci_dev_unlock(hdev);
8225 }
8226 
8227 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8228 {
8229 	struct mgmt_pending_cmd *cmd = data;
8230 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8231 
8232 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8233 }
8234 
8235 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8236 			      void *data, u16 data_len)
8237 {
8238 	struct mgmt_cp_add_ext_adv_params *cp = data;
8239 	struct mgmt_rp_add_ext_adv_params rp;
8240 	struct mgmt_pending_cmd *cmd = NULL;
8241 	u32 flags, min_interval, max_interval;
8242 	u16 timeout, duration;
8243 	u8 status;
8244 	s8 tx_power;
8245 	int err;
8246 
8247 	BT_DBG("%s", hdev->name);
8248 
8249 	status = mgmt_le_support(hdev);
8250 	if (status)
8251 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8252 				       status);
8253 
8254 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8255 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8256 				       MGMT_STATUS_INVALID_PARAMS);
8257 
8258 	/* The purpose of breaking add_advertising into two separate MGMT calls
8259 	 * for params and data is to allow more parameters to be added to this
8260 	 * structure in the future. For this reason, we verify that we have the
8261 	 * bare minimum structure we know of when the interface was defined. Any
8262 	 * extra parameters we don't know about will be ignored in this request.
8263 	 */
8264 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8265 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8266 				       MGMT_STATUS_INVALID_PARAMS);
8267 
8268 	flags = __le32_to_cpu(cp->flags);
8269 
8270 	if (!requested_adv_flags_are_valid(hdev, flags))
8271 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8272 				       MGMT_STATUS_INVALID_PARAMS);
8273 
8274 	hci_dev_lock(hdev);
8275 
8276 	/* In new interface, we require that we are powered to register */
8277 	if (!hdev_is_powered(hdev)) {
8278 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8279 				      MGMT_STATUS_REJECTED);
8280 		goto unlock;
8281 	}
8282 
8283 	if (adv_busy(hdev)) {
8284 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8285 				      MGMT_STATUS_BUSY);
8286 		goto unlock;
8287 	}
8288 
8289 	/* Parse defined parameters from request, use defaults otherwise */
8290 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8291 		  __le16_to_cpu(cp->timeout) : 0;
8292 
8293 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8294 		   __le16_to_cpu(cp->duration) :
8295 		   hdev->def_multi_adv_rotation_duration;
8296 
8297 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8298 		       __le32_to_cpu(cp->min_interval) :
8299 		       hdev->le_adv_min_interval;
8300 
8301 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8302 		       __le32_to_cpu(cp->max_interval) :
8303 		       hdev->le_adv_max_interval;
8304 
8305 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8306 		   cp->tx_power :
8307 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8308 
8309 	/* Create advertising instance with no advertising or response data */
8310 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8311 				   0, NULL, 0, NULL, timeout, duration,
8312 				   tx_power, min_interval, max_interval);
8313 
8314 	if (err < 0) {
8315 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8316 				      MGMT_STATUS_FAILED);
8317 		goto unlock;
8318 	}
8319 
8320 	/* Submit request for advertising params if ext adv available */
8321 	if (ext_adv_capable(hdev)) {
8322 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8323 				       data, data_len);
8324 		if (!cmd) {
8325 			err = -ENOMEM;
8326 			hci_remove_adv_instance(hdev, cp->instance);
8327 			goto unlock;
8328 		}
8329 
8330 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8331 					 add_ext_adv_params_complete);
8332 		if (err < 0)
8333 			mgmt_pending_free(cmd);
8334 	} else {
8335 		rp.instance = cp->instance;
8336 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8337 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8338 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8339 		err = mgmt_cmd_complete(sk, hdev->id,
8340 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8341 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8342 	}
8343 
8344 unlock:
8345 	hci_dev_unlock(hdev);
8346 
8347 	return err;
8348 }
8349 
8350 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8351 {
8352 	struct mgmt_pending_cmd *cmd = data;
8353 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8354 	struct mgmt_rp_add_advertising rp;
8355 
8356 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8357 
8358 	memset(&rp, 0, sizeof(rp));
8359 
8360 	rp.instance = cp->instance;
8361 
8362 	if (err)
8363 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8364 				mgmt_status(err));
8365 	else
8366 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8367 				  mgmt_status(err), &rp, sizeof(rp));
8368 
8369 	mgmt_pending_free(cmd);
8370 }
8371 
8372 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8373 {
8374 	struct mgmt_pending_cmd *cmd = data;
8375 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8376 	int err;
8377 
8378 	if (ext_adv_capable(hdev)) {
8379 		err = hci_update_adv_data_sync(hdev, cp->instance);
8380 		if (err)
8381 			return err;
8382 
8383 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8384 		if (err)
8385 			return err;
8386 
8387 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8388 	}
8389 
8390 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8391 }
8392 
8393 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8394 			    u16 data_len)
8395 {
8396 	struct mgmt_cp_add_ext_adv_data *cp = data;
8397 	struct mgmt_rp_add_ext_adv_data rp;
8398 	u8 schedule_instance = 0;
8399 	struct adv_info *next_instance;
8400 	struct adv_info *adv_instance;
8401 	int err = 0;
8402 	struct mgmt_pending_cmd *cmd;
8403 
8404 	BT_DBG("%s", hdev->name);
8405 
8406 	hci_dev_lock(hdev);
8407 
8408 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8409 
8410 	if (!adv_instance) {
8411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8412 				      MGMT_STATUS_INVALID_PARAMS);
8413 		goto unlock;
8414 	}
8415 
8416 	/* In new interface, we require that we are powered to register */
8417 	if (!hdev_is_powered(hdev)) {
8418 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8419 				      MGMT_STATUS_REJECTED);
8420 		goto clear_new_instance;
8421 	}
8422 
8423 	if (adv_busy(hdev)) {
8424 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8425 				      MGMT_STATUS_BUSY);
8426 		goto clear_new_instance;
8427 	}
8428 
8429 	/* Validate new data */
8430 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8431 			       cp->adv_data_len, true) ||
8432 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8433 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8434 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8435 				      MGMT_STATUS_INVALID_PARAMS);
8436 		goto clear_new_instance;
8437 	}
8438 
8439 	/* Set the data in the advertising instance */
8440 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8441 				  cp->data, cp->scan_rsp_len,
8442 				  cp->data + cp->adv_data_len);
8443 
8444 	/* If using software rotation, determine next instance to use */
8445 	if (hdev->cur_adv_instance == cp->instance) {
8446 		/* If the currently advertised instance is being changed
8447 		 * then cancel the current advertising and schedule the
8448 		 * next instance. If there is only one instance then the
8449 		 * overridden advertising data will be visible right
8450 		 * away
8451 		 */
8452 		cancel_adv_timeout(hdev);
8453 
8454 		next_instance = hci_get_next_instance(hdev, cp->instance);
8455 		if (next_instance)
8456 			schedule_instance = next_instance->instance;
8457 	} else if (!hdev->adv_instance_timeout) {
8458 		/* Immediately advertise the new instance if no other
8459 		 * instance is currently being advertised.
8460 		 */
8461 		schedule_instance = cp->instance;
8462 	}
8463 
8464 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8465 	 * be advertised then we have no HCI communication to make.
8466 	 * Simply return.
8467 	 */
8468 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8469 		if (adv_instance->pending) {
8470 			mgmt_advertising_added(sk, hdev, cp->instance);
8471 			adv_instance->pending = false;
8472 		}
8473 		rp.instance = cp->instance;
8474 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8475 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8476 		goto unlock;
8477 	}
8478 
8479 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8480 			       data_len);
8481 	if (!cmd) {
8482 		err = -ENOMEM;
8483 		goto clear_new_instance;
8484 	}
8485 
8486 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8487 				 add_ext_adv_data_complete);
8488 	if (err < 0) {
8489 		mgmt_pending_free(cmd);
8490 		goto clear_new_instance;
8491 	}
8492 
8493 	/* We were successful in updating data, so trigger advertising_added
8494 	 * event if this is an instance that wasn't previously advertising. If
8495 	 * a failure occurs in the requests we initiated, we will remove the
8496 	 * instance again in add_advertising_complete
8497 	 */
8498 	if (adv_instance->pending)
8499 		mgmt_advertising_added(sk, hdev, cp->instance);
8500 
8501 	goto unlock;
8502 
8503 clear_new_instance:
8504 	hci_remove_adv_instance(hdev, cp->instance);
8505 
8506 unlock:
8507 	hci_dev_unlock(hdev);
8508 
8509 	return err;
8510 }
8511 
8512 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8513 					int err)
8514 {
8515 	struct mgmt_pending_cmd *cmd = data;
8516 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8517 	struct mgmt_rp_remove_advertising rp;
8518 
8519 	bt_dev_dbg(hdev, "err %d", err);
8520 
8521 	memset(&rp, 0, sizeof(rp));
8522 	rp.instance = cp->instance;
8523 
8524 	if (err)
8525 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8526 				mgmt_status(err));
8527 	else
8528 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8529 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8530 
8531 	mgmt_pending_free(cmd);
8532 }
8533 
8534 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8535 {
8536 	struct mgmt_pending_cmd *cmd = data;
8537 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8538 	int err;
8539 
8540 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8541 	if (err)
8542 		return err;
8543 
8544 	if (list_empty(&hdev->adv_instances))
8545 		err = hci_disable_advertising_sync(hdev);
8546 
8547 	return err;
8548 }
8549 
8550 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8551 			      void *data, u16 data_len)
8552 {
8553 	struct mgmt_cp_remove_advertising *cp = data;
8554 	struct mgmt_pending_cmd *cmd;
8555 	int err;
8556 
8557 	bt_dev_dbg(hdev, "sock %p", sk);
8558 
8559 	hci_dev_lock(hdev);
8560 
8561 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8562 		err = mgmt_cmd_status(sk, hdev->id,
8563 				      MGMT_OP_REMOVE_ADVERTISING,
8564 				      MGMT_STATUS_INVALID_PARAMS);
8565 		goto unlock;
8566 	}
8567 
8568 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8569 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8570 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8571 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8572 				      MGMT_STATUS_BUSY);
8573 		goto unlock;
8574 	}
8575 
8576 	if (list_empty(&hdev->adv_instances)) {
8577 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8578 				      MGMT_STATUS_INVALID_PARAMS);
8579 		goto unlock;
8580 	}
8581 
8582 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8583 			       data_len);
8584 	if (!cmd) {
8585 		err = -ENOMEM;
8586 		goto unlock;
8587 	}
8588 
8589 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8590 				 remove_advertising_complete);
8591 	if (err < 0)
8592 		mgmt_pending_free(cmd);
8593 
8594 unlock:
8595 	hci_dev_unlock(hdev);
8596 
8597 	return err;
8598 }
8599 
8600 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8601 			     void *data, u16 data_len)
8602 {
8603 	struct mgmt_cp_get_adv_size_info *cp = data;
8604 	struct mgmt_rp_get_adv_size_info rp;
8605 	u32 flags, supported_flags;
8606 	int err;
8607 
8608 	bt_dev_dbg(hdev, "sock %p", sk);
8609 
8610 	if (!lmp_le_capable(hdev))
8611 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8612 				       MGMT_STATUS_REJECTED);
8613 
8614 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8615 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8616 				       MGMT_STATUS_INVALID_PARAMS);
8617 
8618 	flags = __le32_to_cpu(cp->flags);
8619 
8620 	/* The current implementation only supports a subset of the specified
8621 	 * flags.
8622 	 */
8623 	supported_flags = get_supported_adv_flags(hdev);
8624 	if (flags & ~supported_flags)
8625 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8626 				       MGMT_STATUS_INVALID_PARAMS);
8627 
8628 	rp.instance = cp->instance;
8629 	rp.flags = cp->flags;
8630 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8631 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8632 
8633 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8634 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8635 
8636 	return err;
8637 }
8638 
8639 static const struct hci_mgmt_handler mgmt_handlers[] = {
8640 	{ NULL }, /* 0x0000 (no command) */
8641 	{ read_version,            MGMT_READ_VERSION_SIZE,
8642 						HCI_MGMT_NO_HDEV |
8643 						HCI_MGMT_UNTRUSTED },
8644 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8645 						HCI_MGMT_NO_HDEV |
8646 						HCI_MGMT_UNTRUSTED },
8647 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8648 						HCI_MGMT_NO_HDEV |
8649 						HCI_MGMT_UNTRUSTED },
8650 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8651 						HCI_MGMT_UNTRUSTED },
8652 	{ set_powered,             MGMT_SETTING_SIZE },
8653 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8654 	{ set_connectable,         MGMT_SETTING_SIZE },
8655 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8656 	{ set_bondable,            MGMT_SETTING_SIZE },
8657 	{ set_link_security,       MGMT_SETTING_SIZE },
8658 	{ set_ssp,                 MGMT_SETTING_SIZE },
8659 	{ set_hs,                  MGMT_SETTING_SIZE },
8660 	{ set_le,                  MGMT_SETTING_SIZE },
8661 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8662 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8663 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8664 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8665 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8666 						HCI_MGMT_VAR_LEN },
8667 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8668 						HCI_MGMT_VAR_LEN },
8669 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8670 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8671 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8672 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8673 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8674 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8675 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8676 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8677 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8678 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8679 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8680 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8681 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8682 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8683 						HCI_MGMT_VAR_LEN },
8684 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8685 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8686 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8687 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8688 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8689 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8690 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8691 	{ set_advertising,         MGMT_SETTING_SIZE },
8692 	{ set_bredr,               MGMT_SETTING_SIZE },
8693 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8694 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8695 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8696 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8697 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8698 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8699 						HCI_MGMT_VAR_LEN },
8700 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8701 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8702 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8703 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8704 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8705 						HCI_MGMT_VAR_LEN },
8706 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8707 						HCI_MGMT_NO_HDEV |
8708 						HCI_MGMT_UNTRUSTED },
8709 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8710 						HCI_MGMT_UNCONFIGURED |
8711 						HCI_MGMT_UNTRUSTED },
8712 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8713 						HCI_MGMT_UNCONFIGURED },
8714 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8715 						HCI_MGMT_UNCONFIGURED },
8716 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8717 						HCI_MGMT_VAR_LEN },
8718 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8719 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8720 						HCI_MGMT_NO_HDEV |
8721 						HCI_MGMT_UNTRUSTED },
8722 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8723 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8724 						HCI_MGMT_VAR_LEN },
8725 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8726 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8727 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8728 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8729 						HCI_MGMT_UNTRUSTED },
8730 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8731 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8732 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8733 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8734 						HCI_MGMT_VAR_LEN },
8735 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8736 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8737 						HCI_MGMT_UNTRUSTED },
8738 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8739 						HCI_MGMT_UNTRUSTED |
8740 						HCI_MGMT_HDEV_OPTIONAL },
8741 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8742 						HCI_MGMT_VAR_LEN |
8743 						HCI_MGMT_HDEV_OPTIONAL },
8744 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8745 						HCI_MGMT_UNTRUSTED },
8746 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8747 						HCI_MGMT_VAR_LEN },
8748 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8749 						HCI_MGMT_UNTRUSTED },
8750 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8751 						HCI_MGMT_VAR_LEN },
8752 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8753 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8754 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8755 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8756 						HCI_MGMT_VAR_LEN },
8757 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8758 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8759 						HCI_MGMT_VAR_LEN },
8760 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8761 						HCI_MGMT_VAR_LEN },
8762 	{ add_adv_patterns_monitor_rssi,
8763 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8764 						HCI_MGMT_VAR_LEN },
8765 };
8766 
8767 void mgmt_index_added(struct hci_dev *hdev)
8768 {
8769 	struct mgmt_ev_ext_index ev;
8770 
8771 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8772 		return;
8773 
8774 	switch (hdev->dev_type) {
8775 	case HCI_PRIMARY:
8776 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8777 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8778 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8779 			ev.type = 0x01;
8780 		} else {
8781 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8782 					 HCI_MGMT_INDEX_EVENTS);
8783 			ev.type = 0x00;
8784 		}
8785 		break;
8786 	case HCI_AMP:
8787 		ev.type = 0x02;
8788 		break;
8789 	default:
8790 		return;
8791 	}
8792 
8793 	ev.bus = hdev->bus;
8794 
8795 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8796 			 HCI_MGMT_EXT_INDEX_EVENTS);
8797 }
8798 
8799 void mgmt_index_removed(struct hci_dev *hdev)
8800 {
8801 	struct mgmt_ev_ext_index ev;
8802 	u8 status = MGMT_STATUS_INVALID_INDEX;
8803 
8804 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8805 		return;
8806 
8807 	switch (hdev->dev_type) {
8808 	case HCI_PRIMARY:
8809 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8810 
8811 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8812 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8813 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8814 			ev.type = 0x01;
8815 		} else {
8816 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8817 					 HCI_MGMT_INDEX_EVENTS);
8818 			ev.type = 0x00;
8819 		}
8820 		break;
8821 	case HCI_AMP:
8822 		ev.type = 0x02;
8823 		break;
8824 	default:
8825 		return;
8826 	}
8827 
8828 	ev.bus = hdev->bus;
8829 
8830 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8831 			 HCI_MGMT_EXT_INDEX_EVENTS);
8832 }
8833 
8834 void mgmt_power_on(struct hci_dev *hdev, int err)
8835 {
8836 	struct cmd_lookup match = { NULL, hdev };
8837 
8838 	bt_dev_dbg(hdev, "err %d", err);
8839 
8840 	hci_dev_lock(hdev);
8841 
8842 	if (!err) {
8843 		restart_le_actions(hdev);
8844 		hci_update_passive_scan(hdev);
8845 	}
8846 
8847 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8848 
8849 	new_settings(hdev, match.sk);
8850 
8851 	if (match.sk)
8852 		sock_put(match.sk);
8853 
8854 	hci_dev_unlock(hdev);
8855 }
8856 
8857 void __mgmt_power_off(struct hci_dev *hdev)
8858 {
8859 	struct cmd_lookup match = { NULL, hdev };
8860 	u8 status, zero_cod[] = { 0, 0, 0 };
8861 
8862 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8863 
8864 	/* If the power off is because of hdev unregistration let
8865 	 * use the appropriate INVALID_INDEX status. Otherwise use
8866 	 * NOT_POWERED. We cover both scenarios here since later in
8867 	 * mgmt_index_removed() any hci_conn callbacks will have already
8868 	 * been triggered, potentially causing misleading DISCONNECTED
8869 	 * status responses.
8870 	 */
8871 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8872 		status = MGMT_STATUS_INVALID_INDEX;
8873 	else
8874 		status = MGMT_STATUS_NOT_POWERED;
8875 
8876 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8877 
8878 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8879 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8880 				   zero_cod, sizeof(zero_cod),
8881 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8882 		ext_info_changed(hdev, NULL);
8883 	}
8884 
8885 	new_settings(hdev, match.sk);
8886 
8887 	if (match.sk)
8888 		sock_put(match.sk);
8889 }
8890 
8891 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8892 {
8893 	struct mgmt_pending_cmd *cmd;
8894 	u8 status;
8895 
8896 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8897 	if (!cmd)
8898 		return;
8899 
8900 	if (err == -ERFKILL)
8901 		status = MGMT_STATUS_RFKILLED;
8902 	else
8903 		status = MGMT_STATUS_FAILED;
8904 
8905 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8906 
8907 	mgmt_pending_remove(cmd);
8908 }
8909 
8910 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8911 		       bool persistent)
8912 {
8913 	struct mgmt_ev_new_link_key ev;
8914 
8915 	memset(&ev, 0, sizeof(ev));
8916 
8917 	ev.store_hint = persistent;
8918 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8919 	ev.key.addr.type = BDADDR_BREDR;
8920 	ev.key.type = key->type;
8921 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8922 	ev.key.pin_len = key->pin_len;
8923 
8924 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8925 }
8926 
8927 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8928 {
8929 	switch (ltk->type) {
8930 	case SMP_LTK:
8931 	case SMP_LTK_RESPONDER:
8932 		if (ltk->authenticated)
8933 			return MGMT_LTK_AUTHENTICATED;
8934 		return MGMT_LTK_UNAUTHENTICATED;
8935 	case SMP_LTK_P256:
8936 		if (ltk->authenticated)
8937 			return MGMT_LTK_P256_AUTH;
8938 		return MGMT_LTK_P256_UNAUTH;
8939 	case SMP_LTK_P256_DEBUG:
8940 		return MGMT_LTK_P256_DEBUG;
8941 	}
8942 
8943 	return MGMT_LTK_UNAUTHENTICATED;
8944 }
8945 
8946 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8947 {
8948 	struct mgmt_ev_new_long_term_key ev;
8949 
8950 	memset(&ev, 0, sizeof(ev));
8951 
8952 	/* Devices using resolvable or non-resolvable random addresses
8953 	 * without providing an identity resolving key don't require
8954 	 * to store long term keys. Their addresses will change the
8955 	 * next time around.
8956 	 *
8957 	 * Only when a remote device provides an identity address
8958 	 * make sure the long term key is stored. If the remote
8959 	 * identity is known, the long term keys are internally
8960 	 * mapped to the identity address. So allow static random
8961 	 * and public addresses here.
8962 	 */
8963 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8964 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8965 		ev.store_hint = 0x00;
8966 	else
8967 		ev.store_hint = persistent;
8968 
8969 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8970 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8971 	ev.key.type = mgmt_ltk_type(key);
8972 	ev.key.enc_size = key->enc_size;
8973 	ev.key.ediv = key->ediv;
8974 	ev.key.rand = key->rand;
8975 
8976 	if (key->type == SMP_LTK)
8977 		ev.key.initiator = 1;
8978 
8979 	/* Make sure we copy only the significant bytes based on the
8980 	 * encryption key size, and set the rest of the value to zeroes.
8981 	 */
8982 	memcpy(ev.key.val, key->val, key->enc_size);
8983 	memset(ev.key.val + key->enc_size, 0,
8984 	       sizeof(ev.key.val) - key->enc_size);
8985 
8986 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8987 }
8988 
8989 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8990 {
8991 	struct mgmt_ev_new_irk ev;
8992 
8993 	memset(&ev, 0, sizeof(ev));
8994 
8995 	ev.store_hint = persistent;
8996 
8997 	bacpy(&ev.rpa, &irk->rpa);
8998 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8999 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9000 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9001 
9002 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9003 }
9004 
9005 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9006 		   bool persistent)
9007 {
9008 	struct mgmt_ev_new_csrk ev;
9009 
9010 	memset(&ev, 0, sizeof(ev));
9011 
9012 	/* Devices using resolvable or non-resolvable random addresses
9013 	 * without providing an identity resolving key don't require
9014 	 * to store signature resolving keys. Their addresses will change
9015 	 * the next time around.
9016 	 *
9017 	 * Only when a remote device provides an identity address
9018 	 * make sure the signature resolving key is stored. So allow
9019 	 * static random and public addresses here.
9020 	 */
9021 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9022 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9023 		ev.store_hint = 0x00;
9024 	else
9025 		ev.store_hint = persistent;
9026 
9027 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9028 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9029 	ev.key.type = csrk->type;
9030 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9031 
9032 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9033 }
9034 
9035 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9036 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9037 			 u16 max_interval, u16 latency, u16 timeout)
9038 {
9039 	struct mgmt_ev_new_conn_param ev;
9040 
9041 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9042 		return;
9043 
9044 	memset(&ev, 0, sizeof(ev));
9045 	bacpy(&ev.addr.bdaddr, bdaddr);
9046 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9047 	ev.store_hint = store_hint;
9048 	ev.min_interval = cpu_to_le16(min_interval);
9049 	ev.max_interval = cpu_to_le16(max_interval);
9050 	ev.latency = cpu_to_le16(latency);
9051 	ev.timeout = cpu_to_le16(timeout);
9052 
9053 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9054 }
9055 
9056 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9057 			   u8 *name, u8 name_len)
9058 {
9059 	struct sk_buff *skb;
9060 	struct mgmt_ev_device_connected *ev;
9061 	u16 eir_len = 0;
9062 	u32 flags = 0;
9063 
9064 	if (conn->le_adv_data_len > 0)
9065 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9066 				     conn->le_adv_data_len);
9067 	else
9068 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9069 				     2 + name_len + 5);
9070 
9071 	ev = skb_put(skb, sizeof(*ev));
9072 	bacpy(&ev->addr.bdaddr, &conn->dst);
9073 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9074 
9075 	if (conn->out)
9076 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9077 
9078 	ev->flags = __cpu_to_le32(flags);
9079 
9080 	/* We must ensure that the EIR Data fields are ordered and
9081 	 * unique. Keep it simple for now and avoid the problem by not
9082 	 * adding any BR/EDR data to the LE adv.
9083 	 */
9084 	if (conn->le_adv_data_len > 0) {
9085 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9086 		eir_len = conn->le_adv_data_len;
9087 	} else {
9088 		if (name_len > 0) {
9089 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9090 						  name, name_len);
9091 			skb_put(skb, eir_len);
9092 		}
9093 
9094 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
9095 			eir_len = eir_append_data(ev->eir, eir_len,
9096 						  EIR_CLASS_OF_DEV,
9097 						  conn->dev_class, 3);
9098 			skb_put(skb, 5);
9099 		}
9100 	}
9101 
9102 	ev->eir_len = cpu_to_le16(eir_len);
9103 
9104 	mgmt_event_skb(skb, NULL);
9105 }
9106 
9107 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9108 {
9109 	struct sock **sk = data;
9110 
9111 	cmd->cmd_complete(cmd, 0);
9112 
9113 	*sk = cmd->sk;
9114 	sock_hold(*sk);
9115 
9116 	mgmt_pending_remove(cmd);
9117 }
9118 
9119 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9120 {
9121 	struct hci_dev *hdev = data;
9122 	struct mgmt_cp_unpair_device *cp = cmd->param;
9123 
9124 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9125 
9126 	cmd->cmd_complete(cmd, 0);
9127 	mgmt_pending_remove(cmd);
9128 }
9129 
9130 bool mgmt_powering_down(struct hci_dev *hdev)
9131 {
9132 	struct mgmt_pending_cmd *cmd;
9133 	struct mgmt_mode *cp;
9134 
9135 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9136 	if (!cmd)
9137 		return false;
9138 
9139 	cp = cmd->param;
9140 	if (!cp->val)
9141 		return true;
9142 
9143 	return false;
9144 }
9145 
9146 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9147 			      u8 link_type, u8 addr_type, u8 reason,
9148 			      bool mgmt_connected)
9149 {
9150 	struct mgmt_ev_device_disconnected ev;
9151 	struct sock *sk = NULL;
9152 
9153 	/* The connection is still in hci_conn_hash so test for 1
9154 	 * instead of 0 to know if this is the last one.
9155 	 */
9156 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9157 		cancel_delayed_work(&hdev->power_off);
9158 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9159 	}
9160 
9161 	if (!mgmt_connected)
9162 		return;
9163 
9164 	if (link_type != ACL_LINK && link_type != LE_LINK)
9165 		return;
9166 
9167 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9168 
9169 	bacpy(&ev.addr.bdaddr, bdaddr);
9170 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9171 	ev.reason = reason;
9172 
9173 	/* Report disconnects due to suspend */
9174 	if (hdev->suspended)
9175 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9176 
9177 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9178 
9179 	if (sk)
9180 		sock_put(sk);
9181 
9182 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9183 			     hdev);
9184 }
9185 
9186 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9187 			    u8 link_type, u8 addr_type, u8 status)
9188 {
9189 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9190 	struct mgmt_cp_disconnect *cp;
9191 	struct mgmt_pending_cmd *cmd;
9192 
9193 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9194 			     hdev);
9195 
9196 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9197 	if (!cmd)
9198 		return;
9199 
9200 	cp = cmd->param;
9201 
9202 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9203 		return;
9204 
9205 	if (cp->addr.type != bdaddr_type)
9206 		return;
9207 
9208 	cmd->cmd_complete(cmd, mgmt_status(status));
9209 	mgmt_pending_remove(cmd);
9210 }
9211 
9212 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9213 			 u8 addr_type, u8 status)
9214 {
9215 	struct mgmt_ev_connect_failed ev;
9216 
9217 	/* The connection is still in hci_conn_hash so test for 1
9218 	 * instead of 0 to know if this is the last one.
9219 	 */
9220 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9221 		cancel_delayed_work(&hdev->power_off);
9222 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9223 	}
9224 
9225 	bacpy(&ev.addr.bdaddr, bdaddr);
9226 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9227 	ev.status = mgmt_status(status);
9228 
9229 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9230 }
9231 
9232 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9233 {
9234 	struct mgmt_ev_pin_code_request ev;
9235 
9236 	bacpy(&ev.addr.bdaddr, bdaddr);
9237 	ev.addr.type = BDADDR_BREDR;
9238 	ev.secure = secure;
9239 
9240 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9241 }
9242 
9243 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9244 				  u8 status)
9245 {
9246 	struct mgmt_pending_cmd *cmd;
9247 
9248 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9249 	if (!cmd)
9250 		return;
9251 
9252 	cmd->cmd_complete(cmd, mgmt_status(status));
9253 	mgmt_pending_remove(cmd);
9254 }
9255 
9256 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9257 				      u8 status)
9258 {
9259 	struct mgmt_pending_cmd *cmd;
9260 
9261 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9262 	if (!cmd)
9263 		return;
9264 
9265 	cmd->cmd_complete(cmd, mgmt_status(status));
9266 	mgmt_pending_remove(cmd);
9267 }
9268 
9269 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9270 			      u8 link_type, u8 addr_type, u32 value,
9271 			      u8 confirm_hint)
9272 {
9273 	struct mgmt_ev_user_confirm_request ev;
9274 
9275 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9276 
9277 	bacpy(&ev.addr.bdaddr, bdaddr);
9278 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9279 	ev.confirm_hint = confirm_hint;
9280 	ev.value = cpu_to_le32(value);
9281 
9282 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9283 			  NULL);
9284 }
9285 
9286 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9287 			      u8 link_type, u8 addr_type)
9288 {
9289 	struct mgmt_ev_user_passkey_request ev;
9290 
9291 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9292 
9293 	bacpy(&ev.addr.bdaddr, bdaddr);
9294 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9295 
9296 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9297 			  NULL);
9298 }
9299 
9300 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9301 				      u8 link_type, u8 addr_type, u8 status,
9302 				      u8 opcode)
9303 {
9304 	struct mgmt_pending_cmd *cmd;
9305 
9306 	cmd = pending_find(opcode, hdev);
9307 	if (!cmd)
9308 		return -ENOENT;
9309 
9310 	cmd->cmd_complete(cmd, mgmt_status(status));
9311 	mgmt_pending_remove(cmd);
9312 
9313 	return 0;
9314 }
9315 
9316 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9317 				     u8 link_type, u8 addr_type, u8 status)
9318 {
9319 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9320 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9321 }
9322 
9323 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9324 					 u8 link_type, u8 addr_type, u8 status)
9325 {
9326 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9327 					  status,
9328 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9329 }
9330 
9331 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9332 				     u8 link_type, u8 addr_type, u8 status)
9333 {
9334 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9335 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9336 }
9337 
9338 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9339 					 u8 link_type, u8 addr_type, u8 status)
9340 {
9341 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9342 					  status,
9343 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9344 }
9345 
9346 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9347 			     u8 link_type, u8 addr_type, u32 passkey,
9348 			     u8 entered)
9349 {
9350 	struct mgmt_ev_passkey_notify ev;
9351 
9352 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9353 
9354 	bacpy(&ev.addr.bdaddr, bdaddr);
9355 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9356 	ev.passkey = __cpu_to_le32(passkey);
9357 	ev.entered = entered;
9358 
9359 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9360 }
9361 
9362 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9363 {
9364 	struct mgmt_ev_auth_failed ev;
9365 	struct mgmt_pending_cmd *cmd;
9366 	u8 status = mgmt_status(hci_status);
9367 
9368 	bacpy(&ev.addr.bdaddr, &conn->dst);
9369 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9370 	ev.status = status;
9371 
9372 	cmd = find_pairing(conn);
9373 
9374 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9375 		    cmd ? cmd->sk : NULL);
9376 
9377 	if (cmd) {
9378 		cmd->cmd_complete(cmd, status);
9379 		mgmt_pending_remove(cmd);
9380 	}
9381 }
9382 
9383 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9384 {
9385 	struct cmd_lookup match = { NULL, hdev };
9386 	bool changed;
9387 
9388 	if (status) {
9389 		u8 mgmt_err = mgmt_status(status);
9390 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9391 				     cmd_status_rsp, &mgmt_err);
9392 		return;
9393 	}
9394 
9395 	if (test_bit(HCI_AUTH, &hdev->flags))
9396 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9397 	else
9398 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9399 
9400 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9401 			     &match);
9402 
9403 	if (changed)
9404 		new_settings(hdev, match.sk);
9405 
9406 	if (match.sk)
9407 		sock_put(match.sk);
9408 }
9409 
9410 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9411 {
9412 	struct cmd_lookup *match = data;
9413 
9414 	if (match->sk == NULL) {
9415 		match->sk = cmd->sk;
9416 		sock_hold(match->sk);
9417 	}
9418 }
9419 
9420 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9421 				    u8 status)
9422 {
9423 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9424 
9425 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9426 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9427 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9428 
9429 	if (!status) {
9430 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9431 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9432 		ext_info_changed(hdev, NULL);
9433 	}
9434 
9435 	if (match.sk)
9436 		sock_put(match.sk);
9437 }
9438 
9439 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9440 {
9441 	struct mgmt_cp_set_local_name ev;
9442 	struct mgmt_pending_cmd *cmd;
9443 
9444 	if (status)
9445 		return;
9446 
9447 	memset(&ev, 0, sizeof(ev));
9448 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9449 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9450 
9451 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9452 	if (!cmd) {
9453 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9454 
9455 		/* If this is a HCI command related to powering on the
9456 		 * HCI dev don't send any mgmt signals.
9457 		 */
9458 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9459 			return;
9460 	}
9461 
9462 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9463 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9464 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9465 }
9466 
9467 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9468 {
9469 	int i;
9470 
9471 	for (i = 0; i < uuid_count; i++) {
9472 		if (!memcmp(uuid, uuids[i], 16))
9473 			return true;
9474 	}
9475 
9476 	return false;
9477 }
9478 
9479 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9480 {
9481 	u16 parsed = 0;
9482 
9483 	while (parsed < eir_len) {
9484 		u8 field_len = eir[0];
9485 		u8 uuid[16];
9486 		int i;
9487 
9488 		if (field_len == 0)
9489 			break;
9490 
9491 		if (eir_len - parsed < field_len + 1)
9492 			break;
9493 
9494 		switch (eir[1]) {
9495 		case EIR_UUID16_ALL:
9496 		case EIR_UUID16_SOME:
9497 			for (i = 0; i + 3 <= field_len; i += 2) {
9498 				memcpy(uuid, bluetooth_base_uuid, 16);
9499 				uuid[13] = eir[i + 3];
9500 				uuid[12] = eir[i + 2];
9501 				if (has_uuid(uuid, uuid_count, uuids))
9502 					return true;
9503 			}
9504 			break;
9505 		case EIR_UUID32_ALL:
9506 		case EIR_UUID32_SOME:
9507 			for (i = 0; i + 5 <= field_len; i += 4) {
9508 				memcpy(uuid, bluetooth_base_uuid, 16);
9509 				uuid[15] = eir[i + 5];
9510 				uuid[14] = eir[i + 4];
9511 				uuid[13] = eir[i + 3];
9512 				uuid[12] = eir[i + 2];
9513 				if (has_uuid(uuid, uuid_count, uuids))
9514 					return true;
9515 			}
9516 			break;
9517 		case EIR_UUID128_ALL:
9518 		case EIR_UUID128_SOME:
9519 			for (i = 0; i + 17 <= field_len; i += 16) {
9520 				memcpy(uuid, eir + i + 2, 16);
9521 				if (has_uuid(uuid, uuid_count, uuids))
9522 					return true;
9523 			}
9524 			break;
9525 		}
9526 
9527 		parsed += field_len + 1;
9528 		eir += field_len + 1;
9529 	}
9530 
9531 	return false;
9532 }
9533 
9534 static void restart_le_scan(struct hci_dev *hdev)
9535 {
9536 	/* If controller is not scanning we are done. */
9537 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9538 		return;
9539 
9540 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9541 		       hdev->discovery.scan_start +
9542 		       hdev->discovery.scan_duration))
9543 		return;
9544 
9545 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9546 			   DISCOV_LE_RESTART_DELAY);
9547 }
9548 
9549 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9550 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9551 {
9552 	/* If a RSSI threshold has been specified, and
9553 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9554 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9555 	 * is set, let it through for further processing, as we might need to
9556 	 * restart the scan.
9557 	 *
9558 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9559 	 * the results are also dropped.
9560 	 */
9561 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9562 	    (rssi == HCI_RSSI_INVALID ||
9563 	    (rssi < hdev->discovery.rssi &&
9564 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9565 		return  false;
9566 
9567 	if (hdev->discovery.uuid_count != 0) {
9568 		/* If a list of UUIDs is provided in filter, results with no
9569 		 * matching UUID should be dropped.
9570 		 */
9571 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9572 				   hdev->discovery.uuids) &&
9573 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9574 				   hdev->discovery.uuid_count,
9575 				   hdev->discovery.uuids))
9576 			return false;
9577 	}
9578 
9579 	/* If duplicate filtering does not report RSSI changes, then restart
9580 	 * scanning to ensure updated result with updated RSSI values.
9581 	 */
9582 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9583 		restart_le_scan(hdev);
9584 
9585 		/* Validate RSSI value against the RSSI threshold once more. */
9586 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9587 		    rssi < hdev->discovery.rssi)
9588 			return false;
9589 	}
9590 
9591 	return true;
9592 }
9593 
9594 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9595 				  bdaddr_t *bdaddr, u8 addr_type)
9596 {
9597 	struct mgmt_ev_adv_monitor_device_lost ev;
9598 
9599 	ev.monitor_handle = cpu_to_le16(handle);
9600 	bacpy(&ev.addr.bdaddr, bdaddr);
9601 	ev.addr.type = addr_type;
9602 
9603 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9604 		   NULL);
9605 }
9606 
9607 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9608 					  bdaddr_t *bdaddr, bool report_device,
9609 					  struct sk_buff *skb,
9610 					  struct sock *skip_sk)
9611 {
9612 	struct sk_buff *advmon_skb;
9613 	size_t advmon_skb_len;
9614 	__le16 *monitor_handle;
9615 	struct monitored_device *dev, *tmp;
9616 	bool matched = false;
9617 	bool notify = false;
9618 
9619 	/* We have received the Advertisement Report because:
9620 	 * 1. the kernel has initiated active discovery
9621 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9622 	 *    passive scanning
9623 	 * 3. if none of the above is true, we have one or more active
9624 	 *    Advertisement Monitor
9625 	 *
9626 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9627 	 * and report ONLY one advertisement per device for the matched Monitor
9628 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9629 	 *
9630 	 * For case 3, since we are not active scanning and all advertisements
9631 	 * received are due to a matched Advertisement Monitor, report all
9632 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9633 	 */
9634 	if (report_device && !hdev->advmon_pend_notify) {
9635 		mgmt_event_skb(skb, skip_sk);
9636 		return;
9637 	}
9638 
9639 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9640 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9641 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9642 				    advmon_skb_len);
9643 	if (!advmon_skb) {
9644 		if (report_device)
9645 			mgmt_event_skb(skb, skip_sk);
9646 		else
9647 			kfree_skb(skb);
9648 		return;
9649 	}
9650 
9651 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9652 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9653 	 * store monitor_handle of the matched monitor.
9654 	 */
9655 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9656 	skb_put_data(advmon_skb, skb->data, skb->len);
9657 
9658 	hdev->advmon_pend_notify = false;
9659 
9660 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9661 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9662 			matched = true;
9663 
9664 			if (!dev->notified) {
9665 				*monitor_handle = cpu_to_le16(dev->handle);
9666 				notify = true;
9667 				dev->notified = true;
9668 			}
9669 		}
9670 
9671 		if (!dev->notified)
9672 			hdev->advmon_pend_notify = true;
9673 	}
9674 
9675 	if (!report_device &&
9676 	    ((matched && !notify) || !msft_monitor_supported(hdev))) {
9677 		/* Handle 0 indicates that we are not active scanning and this
9678 		 * is a subsequent advertisement report for an already matched
9679 		 * Advertisement Monitor or the controller offloading support
9680 		 * is not available.
9681 		 */
9682 		*monitor_handle = 0;
9683 		notify = true;
9684 	}
9685 
9686 	if (report_device)
9687 		mgmt_event_skb(skb, skip_sk);
9688 	else
9689 		kfree_skb(skb);
9690 
9691 	if (notify)
9692 		mgmt_event_skb(advmon_skb, skip_sk);
9693 	else
9694 		kfree_skb(advmon_skb);
9695 }
9696 
9697 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9698 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9699 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9700 {
9701 	struct sk_buff *skb;
9702 	struct mgmt_ev_device_found *ev;
9703 	bool report_device = hci_discovery_active(hdev);
9704 
9705 	/* Don't send events for a non-kernel initiated discovery. With
9706 	 * LE one exception is if we have pend_le_reports > 0 in which
9707 	 * case we're doing passive scanning and want these events.
9708 	 */
9709 	if (!hci_discovery_active(hdev)) {
9710 		if (link_type == ACL_LINK)
9711 			return;
9712 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9713 			report_device = true;
9714 		else if (!hci_is_adv_monitoring(hdev))
9715 			return;
9716 	}
9717 
9718 	if (hdev->discovery.result_filtering) {
9719 		/* We are using service discovery */
9720 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9721 				     scan_rsp_len))
9722 			return;
9723 	}
9724 
9725 	if (hdev->discovery.limited) {
9726 		/* Check for limited discoverable bit */
9727 		if (dev_class) {
9728 			if (!(dev_class[1] & 0x20))
9729 				return;
9730 		} else {
9731 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9732 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9733 				return;
9734 		}
9735 	}
9736 
9737 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9738 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9739 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9740 	if (!skb)
9741 		return;
9742 
9743 	ev = skb_put(skb, sizeof(*ev));
9744 
9745 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9746 	 * RSSI value was reported as 0 when not available. This behavior
9747 	 * is kept when using device discovery. This is required for full
9748 	 * backwards compatibility with the API.
9749 	 *
9750 	 * However when using service discovery, the value 127 will be
9751 	 * returned when the RSSI is not available.
9752 	 */
9753 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9754 	    link_type == ACL_LINK)
9755 		rssi = 0;
9756 
9757 	bacpy(&ev->addr.bdaddr, bdaddr);
9758 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9759 	ev->rssi = rssi;
9760 	ev->flags = cpu_to_le32(flags);
9761 
9762 	if (eir_len > 0)
9763 		/* Copy EIR or advertising data into event */
9764 		skb_put_data(skb, eir, eir_len);
9765 
9766 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9767 		u8 eir_cod[5];
9768 
9769 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9770 					   dev_class, 3);
9771 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9772 	}
9773 
9774 	if (scan_rsp_len > 0)
9775 		/* Append scan response data to event */
9776 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9777 
9778 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9779 
9780 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9781 }
9782 
9783 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9784 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9785 {
9786 	struct sk_buff *skb;
9787 	struct mgmt_ev_device_found *ev;
9788 	u16 eir_len;
9789 	u32 flags;
9790 
9791 	if (name_len)
9792 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
9793 	else
9794 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
9795 
9796 	ev = skb_put(skb, sizeof(*ev));
9797 	bacpy(&ev->addr.bdaddr, bdaddr);
9798 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9799 	ev->rssi = rssi;
9800 
9801 	if (name) {
9802 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9803 					  name_len);
9804 		flags = 0;
9805 		skb_put(skb, eir_len);
9806 	} else {
9807 		eir_len = 0;
9808 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9809 	}
9810 
9811 	ev->eir_len = cpu_to_le16(eir_len);
9812 	ev->flags = cpu_to_le32(flags);
9813 
9814 	mgmt_event_skb(skb, NULL);
9815 }
9816 
9817 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9818 {
9819 	struct mgmt_ev_discovering ev;
9820 
9821 	bt_dev_dbg(hdev, "discovering %u", discovering);
9822 
9823 	memset(&ev, 0, sizeof(ev));
9824 	ev.type = hdev->discovery.type;
9825 	ev.discovering = discovering;
9826 
9827 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9828 }
9829 
9830 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9831 {
9832 	struct mgmt_ev_controller_suspend ev;
9833 
9834 	ev.suspend_state = state;
9835 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9836 }
9837 
9838 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9839 		   u8 addr_type)
9840 {
9841 	struct mgmt_ev_controller_resume ev;
9842 
9843 	ev.wake_reason = reason;
9844 	if (bdaddr) {
9845 		bacpy(&ev.addr.bdaddr, bdaddr);
9846 		ev.addr.type = addr_type;
9847 	} else {
9848 		memset(&ev.addr, 0, sizeof(ev.addr));
9849 	}
9850 
9851 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9852 }
9853 
9854 static struct hci_mgmt_chan chan = {
9855 	.channel	= HCI_CHANNEL_CONTROL,
9856 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9857 	.handlers	= mgmt_handlers,
9858 	.hdev_init	= mgmt_init_hdev,
9859 };
9860 
9861 int mgmt_init(void)
9862 {
9863 	return hci_mgmt_chan_register(&chan);
9864 }
9865 
9866 void mgmt_exit(void)
9867 {
9868 	hci_mgmt_chan_unregister(&chan);
9869 }
9870