xref: /linux/net/bluetooth/mgmt.c (revision 41fb0cf1bced59c1fe178cf6cc9f716b5da9e40e)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	21
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 };
178 
179 static const u16 mgmt_untrusted_commands[] = {
180 	MGMT_OP_READ_INDEX_LIST,
181 	MGMT_OP_READ_INFO,
182 	MGMT_OP_READ_UNCONF_INDEX_LIST,
183 	MGMT_OP_READ_CONFIG_INFO,
184 	MGMT_OP_READ_EXT_INDEX_LIST,
185 	MGMT_OP_READ_EXT_INFO,
186 	MGMT_OP_READ_CONTROLLER_CAP,
187 	MGMT_OP_READ_EXP_FEATURES_INFO,
188 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
189 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
190 };
191 
192 static const u16 mgmt_untrusted_events[] = {
193 	MGMT_EV_INDEX_ADDED,
194 	MGMT_EV_INDEX_REMOVED,
195 	MGMT_EV_NEW_SETTINGS,
196 	MGMT_EV_CLASS_OF_DEV_CHANGED,
197 	MGMT_EV_LOCAL_NAME_CHANGED,
198 	MGMT_EV_UNCONF_INDEX_ADDED,
199 	MGMT_EV_UNCONF_INDEX_REMOVED,
200 	MGMT_EV_NEW_CONFIG_OPTIONS,
201 	MGMT_EV_EXT_INDEX_ADDED,
202 	MGMT_EV_EXT_INDEX_REMOVED,
203 	MGMT_EV_EXT_INFO_CHANGED,
204 	MGMT_EV_EXP_FEATURE_CHANGED,
205 };
206 
207 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
208 
209 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
210 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
211 
212 /* HCI to MGMT error code conversion table */
213 static const u8 mgmt_status_table[] = {
214 	MGMT_STATUS_SUCCESS,
215 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
216 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
217 	MGMT_STATUS_FAILED,		/* Hardware Failure */
218 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
219 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
220 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
221 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
222 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
224 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
225 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
226 	MGMT_STATUS_BUSY,		/* Command Disallowed */
227 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
228 	MGMT_STATUS_REJECTED,		/* Rejected Security */
229 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
230 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
232 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
233 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
234 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
235 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
236 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
237 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
238 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
239 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
240 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
241 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
242 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
243 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
244 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
245 	MGMT_STATUS_FAILED,		/* Unspecified Error */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
247 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
248 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
249 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
250 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
251 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
252 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
253 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
254 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
256 	MGMT_STATUS_FAILED,		/* Transaction Collision */
257 	MGMT_STATUS_FAILED,		/* Reserved for future use */
258 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
259 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
260 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
261 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
262 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_FAILED,		/* Slot Violation */
267 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
268 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
269 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
270 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
271 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
272 	MGMT_STATUS_BUSY,		/* Controller Busy */
273 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
274 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
275 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
276 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
277 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
278 };
279 
280 static u8 mgmt_errno_status(int err)
281 {
282 	switch (err) {
283 	case 0:
284 		return MGMT_STATUS_SUCCESS;
285 	case -EPERM:
286 		return MGMT_STATUS_REJECTED;
287 	case -EINVAL:
288 		return MGMT_STATUS_INVALID_PARAMS;
289 	case -EOPNOTSUPP:
290 		return MGMT_STATUS_NOT_SUPPORTED;
291 	case -EBUSY:
292 		return MGMT_STATUS_BUSY;
293 	case -ETIMEDOUT:
294 		return MGMT_STATUS_AUTH_FAILED;
295 	case -ENOMEM:
296 		return MGMT_STATUS_NO_RESOURCES;
297 	case -EISCONN:
298 		return MGMT_STATUS_ALREADY_CONNECTED;
299 	case -ENOTCONN:
300 		return MGMT_STATUS_DISCONNECTED;
301 	}
302 
303 	return MGMT_STATUS_FAILED;
304 }
305 
306 static u8 mgmt_status(int err)
307 {
308 	if (err < 0)
309 		return mgmt_errno_status(err);
310 
311 	if (err < ARRAY_SIZE(mgmt_status_table))
312 		return mgmt_status_table[err];
313 
314 	return MGMT_STATUS_FAILED;
315 }
316 
317 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
318 			    u16 len, int flag)
319 {
320 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
321 			       flag, NULL);
322 }
323 
324 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
325 			      u16 len, int flag, struct sock *skip_sk)
326 {
327 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
328 			       flag, skip_sk);
329 }
330 
331 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
332 		      struct sock *skip_sk)
333 {
334 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
335 			       HCI_SOCK_TRUSTED, skip_sk);
336 }
337 
338 static u8 le_addr_type(u8 mgmt_addr_type)
339 {
340 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
341 		return ADDR_LE_DEV_PUBLIC;
342 	else
343 		return ADDR_LE_DEV_RANDOM;
344 }
345 
346 void mgmt_fill_version_info(void *ver)
347 {
348 	struct mgmt_rp_read_version *rp = ver;
349 
350 	rp->version = MGMT_VERSION;
351 	rp->revision = cpu_to_le16(MGMT_REVISION);
352 }
353 
354 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
355 			u16 data_len)
356 {
357 	struct mgmt_rp_read_version rp;
358 
359 	bt_dev_dbg(hdev, "sock %p", sk);
360 
361 	mgmt_fill_version_info(&rp);
362 
363 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
364 				 &rp, sizeof(rp));
365 }
366 
367 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
368 			 u16 data_len)
369 {
370 	struct mgmt_rp_read_commands *rp;
371 	u16 num_commands, num_events;
372 	size_t rp_size;
373 	int i, err;
374 
375 	bt_dev_dbg(hdev, "sock %p", sk);
376 
377 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
378 		num_commands = ARRAY_SIZE(mgmt_commands);
379 		num_events = ARRAY_SIZE(mgmt_events);
380 	} else {
381 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
382 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
383 	}
384 
385 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
386 
387 	rp = kmalloc(rp_size, GFP_KERNEL);
388 	if (!rp)
389 		return -ENOMEM;
390 
391 	rp->num_commands = cpu_to_le16(num_commands);
392 	rp->num_events = cpu_to_le16(num_events);
393 
394 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
395 		__le16 *opcode = rp->opcodes;
396 
397 		for (i = 0; i < num_commands; i++, opcode++)
398 			put_unaligned_le16(mgmt_commands[i], opcode);
399 
400 		for (i = 0; i < num_events; i++, opcode++)
401 			put_unaligned_le16(mgmt_events[i], opcode);
402 	} else {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
410 	}
411 
412 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
413 				rp, rp_size);
414 	kfree(rp);
415 
416 	return err;
417 }
418 
419 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
420 			   u16 data_len)
421 {
422 	struct mgmt_rp_read_index_list *rp;
423 	struct hci_dev *d;
424 	size_t rp_len;
425 	u16 count;
426 	int err;
427 
428 	bt_dev_dbg(hdev, "sock %p", sk);
429 
430 	read_lock(&hci_dev_list_lock);
431 
432 	count = 0;
433 	list_for_each_entry(d, &hci_dev_list, list) {
434 		if (d->dev_type == HCI_PRIMARY &&
435 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
436 			count++;
437 	}
438 
439 	rp_len = sizeof(*rp) + (2 * count);
440 	rp = kmalloc(rp_len, GFP_ATOMIC);
441 	if (!rp) {
442 		read_unlock(&hci_dev_list_lock);
443 		return -ENOMEM;
444 	}
445 
446 	count = 0;
447 	list_for_each_entry(d, &hci_dev_list, list) {
448 		if (hci_dev_test_flag(d, HCI_SETUP) ||
449 		    hci_dev_test_flag(d, HCI_CONFIG) ||
450 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
451 			continue;
452 
453 		/* Devices marked as raw-only are neither configured
454 		 * nor unconfigured controllers.
455 		 */
456 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 			continue;
458 
459 		if (d->dev_type == HCI_PRIMARY &&
460 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
461 			rp->index[count++] = cpu_to_le16(d->id);
462 			bt_dev_dbg(hdev, "Added hci%u", d->id);
463 		}
464 	}
465 
466 	rp->num_controllers = cpu_to_le16(count);
467 	rp_len = sizeof(*rp) + (2 * count);
468 
469 	read_unlock(&hci_dev_list_lock);
470 
471 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
472 				0, rp, rp_len);
473 
474 	kfree(rp);
475 
476 	return err;
477 }
478 
479 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
480 				  void *data, u16 data_len)
481 {
482 	struct mgmt_rp_read_unconf_index_list *rp;
483 	struct hci_dev *d;
484 	size_t rp_len;
485 	u16 count;
486 	int err;
487 
488 	bt_dev_dbg(hdev, "sock %p", sk);
489 
490 	read_lock(&hci_dev_list_lock);
491 
492 	count = 0;
493 	list_for_each_entry(d, &hci_dev_list, list) {
494 		if (d->dev_type == HCI_PRIMARY &&
495 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
496 			count++;
497 	}
498 
499 	rp_len = sizeof(*rp) + (2 * count);
500 	rp = kmalloc(rp_len, GFP_ATOMIC);
501 	if (!rp) {
502 		read_unlock(&hci_dev_list_lock);
503 		return -ENOMEM;
504 	}
505 
506 	count = 0;
507 	list_for_each_entry(d, &hci_dev_list, list) {
508 		if (hci_dev_test_flag(d, HCI_SETUP) ||
509 		    hci_dev_test_flag(d, HCI_CONFIG) ||
510 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
511 			continue;
512 
513 		/* Devices marked as raw-only are neither configured
514 		 * nor unconfigured controllers.
515 		 */
516 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
517 			continue;
518 
519 		if (d->dev_type == HCI_PRIMARY &&
520 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
521 			rp->index[count++] = cpu_to_le16(d->id);
522 			bt_dev_dbg(hdev, "Added hci%u", d->id);
523 		}
524 	}
525 
526 	rp->num_controllers = cpu_to_le16(count);
527 	rp_len = sizeof(*rp) + (2 * count);
528 
529 	read_unlock(&hci_dev_list_lock);
530 
531 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
532 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
533 
534 	kfree(rp);
535 
536 	return err;
537 }
538 
539 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
540 			       void *data, u16 data_len)
541 {
542 	struct mgmt_rp_read_ext_index_list *rp;
543 	struct hci_dev *d;
544 	u16 count;
545 	int err;
546 
547 	bt_dev_dbg(hdev, "sock %p", sk);
548 
549 	read_lock(&hci_dev_list_lock);
550 
551 	count = 0;
552 	list_for_each_entry(d, &hci_dev_list, list) {
553 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
554 			count++;
555 	}
556 
557 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
558 	if (!rp) {
559 		read_unlock(&hci_dev_list_lock);
560 		return -ENOMEM;
561 	}
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (hci_dev_test_flag(d, HCI_SETUP) ||
566 		    hci_dev_test_flag(d, HCI_CONFIG) ||
567 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
568 			continue;
569 
570 		/* Devices marked as raw-only are neither configured
571 		 * nor unconfigured controllers.
572 		 */
573 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
574 			continue;
575 
576 		if (d->dev_type == HCI_PRIMARY) {
577 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
578 				rp->entry[count].type = 0x01;
579 			else
580 				rp->entry[count].type = 0x00;
581 		} else if (d->dev_type == HCI_AMP) {
582 			rp->entry[count].type = 0x02;
583 		} else {
584 			continue;
585 		}
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 			if (IS_ENABLED(CONFIG_BT_HS))
827 				settings |= MGMT_SETTING_HS;
828 		}
829 
830 		if (lmp_sc_capable(hdev))
831 			settings |= MGMT_SETTING_SECURE_CONN;
832 
833 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
834 			     &hdev->quirks))
835 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
836 	}
837 
838 	if (lmp_le_capable(hdev)) {
839 		settings |= MGMT_SETTING_LE;
840 		settings |= MGMT_SETTING_SECURE_CONN;
841 		settings |= MGMT_SETTING_PRIVACY;
842 		settings |= MGMT_SETTING_STATIC_ADDRESS;
843 		settings |= MGMT_SETTING_ADVERTISING;
844 	}
845 
846 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
847 	    hdev->set_bdaddr)
848 		settings |= MGMT_SETTING_CONFIGURATION;
849 
850 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
851 
852 	return settings;
853 }
854 
855 static u32 get_current_settings(struct hci_dev *hdev)
856 {
857 	u32 settings = 0;
858 
859 	if (hdev_is_powered(hdev))
860 		settings |= MGMT_SETTING_POWERED;
861 
862 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
863 		settings |= MGMT_SETTING_CONNECTABLE;
864 
865 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
866 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
867 
868 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
869 		settings |= MGMT_SETTING_DISCOVERABLE;
870 
871 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
872 		settings |= MGMT_SETTING_BONDABLE;
873 
874 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
875 		settings |= MGMT_SETTING_BREDR;
876 
877 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
878 		settings |= MGMT_SETTING_LE;
879 
880 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
881 		settings |= MGMT_SETTING_LINK_SECURITY;
882 
883 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
884 		settings |= MGMT_SETTING_SSP;
885 
886 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
887 		settings |= MGMT_SETTING_HS;
888 
889 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
890 		settings |= MGMT_SETTING_ADVERTISING;
891 
892 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
893 		settings |= MGMT_SETTING_SECURE_CONN;
894 
895 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
896 		settings |= MGMT_SETTING_DEBUG_KEYS;
897 
898 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
899 		settings |= MGMT_SETTING_PRIVACY;
900 
901 	/* The current setting for static address has two purposes. The
902 	 * first is to indicate if the static address will be used and
903 	 * the second is to indicate if it is actually set.
904 	 *
905 	 * This means if the static address is not configured, this flag
906 	 * will never be set. If the address is configured, then if the
907 	 * address is actually used decides if the flag is set or not.
908 	 *
909 	 * For single mode LE only controllers and dual-mode controllers
910 	 * with BR/EDR disabled, the existence of the static address will
911 	 * be evaluated.
912 	 */
913 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
914 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
915 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
916 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
917 			settings |= MGMT_SETTING_STATIC_ADDRESS;
918 	}
919 
920 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
921 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
922 
923 	return settings;
924 }
925 
926 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
927 {
928 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
929 }
930 
931 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
932 {
933 	struct mgmt_pending_cmd *cmd;
934 
935 	/* If there's a pending mgmt command the flags will not yet have
936 	 * their final values, so check for this first.
937 	 */
938 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
939 	if (cmd) {
940 		struct mgmt_mode *cp = cmd->param;
941 		if (cp->val == 0x01)
942 			return LE_AD_GENERAL;
943 		else if (cp->val == 0x02)
944 			return LE_AD_LIMITED;
945 	} else {
946 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
947 			return LE_AD_LIMITED;
948 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
949 			return LE_AD_GENERAL;
950 	}
951 
952 	return 0;
953 }
954 
955 bool mgmt_get_connectable(struct hci_dev *hdev)
956 {
957 	struct mgmt_pending_cmd *cmd;
958 
959 	/* If there's a pending mgmt command the flag will not yet have
960 	 * it's final value, so check for this first.
961 	 */
962 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
963 	if (cmd) {
964 		struct mgmt_mode *cp = cmd->param;
965 
966 		return cp->val;
967 	}
968 
969 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
970 }
971 
972 static int service_cache_sync(struct hci_dev *hdev, void *data)
973 {
974 	hci_update_eir_sync(hdev);
975 	hci_update_class_sync(hdev);
976 
977 	return 0;
978 }
979 
980 static void service_cache_off(struct work_struct *work)
981 {
982 	struct hci_dev *hdev = container_of(work, struct hci_dev,
983 					    service_cache.work);
984 
985 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
986 		return;
987 
988 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
989 }
990 
991 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
992 {
993 	/* The generation of a new RPA and programming it into the
994 	 * controller happens in the hci_req_enable_advertising()
995 	 * function.
996 	 */
997 	if (ext_adv_capable(hdev))
998 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
999 	else
1000 		return hci_enable_advertising_sync(hdev);
1001 }
1002 
1003 static void rpa_expired(struct work_struct *work)
1004 {
1005 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1006 					    rpa_expired.work);
1007 
1008 	bt_dev_dbg(hdev, "");
1009 
1010 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1011 
1012 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1013 		return;
1014 
1015 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1016 }
1017 
1018 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1019 {
1020 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1021 		return;
1022 
1023 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1024 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1025 
1026 	/* Non-mgmt controlled devices get this bit set
1027 	 * implicitly so that pairing works for them, however
1028 	 * for mgmt we require user-space to explicitly enable
1029 	 * it
1030 	 */
1031 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1032 }
1033 
1034 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1035 				void *data, u16 data_len)
1036 {
1037 	struct mgmt_rp_read_info rp;
1038 
1039 	bt_dev_dbg(hdev, "sock %p", sk);
1040 
1041 	hci_dev_lock(hdev);
1042 
1043 	memset(&rp, 0, sizeof(rp));
1044 
1045 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1046 
1047 	rp.version = hdev->hci_ver;
1048 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1049 
1050 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1051 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1052 
1053 	memcpy(rp.dev_class, hdev->dev_class, 3);
1054 
1055 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1056 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1057 
1058 	hci_dev_unlock(hdev);
1059 
1060 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1061 				 sizeof(rp));
1062 }
1063 
1064 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1065 {
1066 	u16 eir_len = 0;
1067 	size_t name_len;
1068 
1069 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1070 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1071 					  hdev->dev_class, 3);
1072 
1073 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1074 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1075 					  hdev->appearance);
1076 
1077 	name_len = strlen(hdev->dev_name);
1078 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1079 				  hdev->dev_name, name_len);
1080 
1081 	name_len = strlen(hdev->short_name);
1082 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1083 				  hdev->short_name, name_len);
1084 
1085 	return eir_len;
1086 }
1087 
1088 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1089 				    void *data, u16 data_len)
1090 {
1091 	char buf[512];
1092 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1093 	u16 eir_len;
1094 
1095 	bt_dev_dbg(hdev, "sock %p", sk);
1096 
1097 	memset(&buf, 0, sizeof(buf));
1098 
1099 	hci_dev_lock(hdev);
1100 
1101 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1102 
1103 	rp->version = hdev->hci_ver;
1104 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1105 
1106 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1107 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1108 
1109 
1110 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1111 	rp->eir_len = cpu_to_le16(eir_len);
1112 
1113 	hci_dev_unlock(hdev);
1114 
1115 	/* If this command is called at least once, then the events
1116 	 * for class of device and local name changes are disabled
1117 	 * and only the new extended controller information event
1118 	 * is used.
1119 	 */
1120 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1121 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1122 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1123 
1124 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1125 				 sizeof(*rp) + eir_len);
1126 }
1127 
1128 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1129 {
1130 	char buf[512];
1131 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1132 	u16 eir_len;
1133 
1134 	memset(buf, 0, sizeof(buf));
1135 
1136 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1137 	ev->eir_len = cpu_to_le16(eir_len);
1138 
1139 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1140 				  sizeof(*ev) + eir_len,
1141 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1142 }
1143 
1144 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1145 {
1146 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1147 
1148 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1149 				 sizeof(settings));
1150 }
1151 
1152 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1153 {
1154 	struct mgmt_ev_advertising_added ev;
1155 
1156 	ev.instance = instance;
1157 
1158 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1159 }
1160 
1161 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1162 			      u8 instance)
1163 {
1164 	struct mgmt_ev_advertising_removed ev;
1165 
1166 	ev.instance = instance;
1167 
1168 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1169 }
1170 
1171 static void cancel_adv_timeout(struct hci_dev *hdev)
1172 {
1173 	if (hdev->adv_instance_timeout) {
1174 		hdev->adv_instance_timeout = 0;
1175 		cancel_delayed_work(&hdev->adv_instance_expire);
1176 	}
1177 }
1178 
1179 /* This function requires the caller holds hdev->lock */
1180 static void restart_le_actions(struct hci_dev *hdev)
1181 {
1182 	struct hci_conn_params *p;
1183 
1184 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1185 		/* Needed for AUTO_OFF case where might not "really"
1186 		 * have been powered off.
1187 		 */
1188 		list_del_init(&p->action);
1189 
1190 		switch (p->auto_connect) {
1191 		case HCI_AUTO_CONN_DIRECT:
1192 		case HCI_AUTO_CONN_ALWAYS:
1193 			list_add(&p->action, &hdev->pend_le_conns);
1194 			break;
1195 		case HCI_AUTO_CONN_REPORT:
1196 			list_add(&p->action, &hdev->pend_le_reports);
1197 			break;
1198 		default:
1199 			break;
1200 		}
1201 	}
1202 }
1203 
1204 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1205 {
1206 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1207 
1208 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1209 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1210 }
1211 
1212 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1213 {
1214 	struct mgmt_pending_cmd *cmd = data;
1215 	struct mgmt_mode *cp = cmd->param;
1216 
1217 	bt_dev_dbg(hdev, "err %d", err);
1218 
1219 	if (!err) {
1220 		if (cp->val) {
1221 			hci_dev_lock(hdev);
1222 			restart_le_actions(hdev);
1223 			hci_update_passive_scan(hdev);
1224 			hci_dev_unlock(hdev);
1225 		}
1226 
1227 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1228 
1229 		/* Only call new_setting for power on as power off is deferred
1230 		 * to hdev->power_off work which does call hci_dev_do_close.
1231 		 */
1232 		if (cp->val)
1233 			new_settings(hdev, cmd->sk);
1234 	} else {
1235 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1236 				mgmt_status(err));
1237 	}
1238 
1239 	mgmt_pending_free(cmd);
1240 }
1241 
1242 static int set_powered_sync(struct hci_dev *hdev, void *data)
1243 {
1244 	struct mgmt_pending_cmd *cmd = data;
1245 	struct mgmt_mode *cp = cmd->param;
1246 
1247 	BT_DBG("%s", hdev->name);
1248 
1249 	return hci_set_powered_sync(hdev, cp->val);
1250 }
1251 
1252 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1253 		       u16 len)
1254 {
1255 	struct mgmt_mode *cp = data;
1256 	struct mgmt_pending_cmd *cmd;
1257 	int err;
1258 
1259 	bt_dev_dbg(hdev, "sock %p", sk);
1260 
1261 	if (cp->val != 0x00 && cp->val != 0x01)
1262 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1263 				       MGMT_STATUS_INVALID_PARAMS);
1264 
1265 	hci_dev_lock(hdev);
1266 
1267 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1268 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1269 				      MGMT_STATUS_BUSY);
1270 		goto failed;
1271 	}
1272 
1273 	if (!!cp->val == hdev_is_powered(hdev)) {
1274 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1275 		goto failed;
1276 	}
1277 
1278 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1279 	if (!cmd) {
1280 		err = -ENOMEM;
1281 		goto failed;
1282 	}
1283 
1284 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1285 				 mgmt_set_powered_complete);
1286 
1287 failed:
1288 	hci_dev_unlock(hdev);
1289 	return err;
1290 }
1291 
1292 int mgmt_new_settings(struct hci_dev *hdev)
1293 {
1294 	return new_settings(hdev, NULL);
1295 }
1296 
1297 struct cmd_lookup {
1298 	struct sock *sk;
1299 	struct hci_dev *hdev;
1300 	u8 mgmt_status;
1301 };
1302 
1303 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1304 {
1305 	struct cmd_lookup *match = data;
1306 
1307 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1308 
1309 	list_del(&cmd->list);
1310 
1311 	if (match->sk == NULL) {
1312 		match->sk = cmd->sk;
1313 		sock_hold(match->sk);
1314 	}
1315 
1316 	mgmt_pending_free(cmd);
1317 }
1318 
1319 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1320 {
1321 	u8 *status = data;
1322 
1323 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1324 	mgmt_pending_remove(cmd);
1325 }
1326 
1327 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1328 {
1329 	if (cmd->cmd_complete) {
1330 		u8 *status = data;
1331 
1332 		cmd->cmd_complete(cmd, *status);
1333 		mgmt_pending_remove(cmd);
1334 
1335 		return;
1336 	}
1337 
1338 	cmd_status_rsp(cmd, data);
1339 }
1340 
1341 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1342 {
1343 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1344 				 cmd->param, cmd->param_len);
1345 }
1346 
1347 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1348 {
1349 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1350 				 cmd->param, sizeof(struct mgmt_addr_info));
1351 }
1352 
1353 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1354 {
1355 	if (!lmp_bredr_capable(hdev))
1356 		return MGMT_STATUS_NOT_SUPPORTED;
1357 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1358 		return MGMT_STATUS_REJECTED;
1359 	else
1360 		return MGMT_STATUS_SUCCESS;
1361 }
1362 
1363 static u8 mgmt_le_support(struct hci_dev *hdev)
1364 {
1365 	if (!lmp_le_capable(hdev))
1366 		return MGMT_STATUS_NOT_SUPPORTED;
1367 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1368 		return MGMT_STATUS_REJECTED;
1369 	else
1370 		return MGMT_STATUS_SUCCESS;
1371 }
1372 
1373 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1374 					   int err)
1375 {
1376 	struct mgmt_pending_cmd *cmd = data;
1377 
1378 	bt_dev_dbg(hdev, "err %d", err);
1379 
1380 	hci_dev_lock(hdev);
1381 
1382 	if (err) {
1383 		u8 mgmt_err = mgmt_status(err);
1384 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1385 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1386 		goto done;
1387 	}
1388 
1389 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1390 	    hdev->discov_timeout > 0) {
1391 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1392 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1393 	}
1394 
1395 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1396 	new_settings(hdev, cmd->sk);
1397 
1398 done:
1399 	mgmt_pending_free(cmd);
1400 	hci_dev_unlock(hdev);
1401 }
1402 
1403 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1404 {
1405 	BT_DBG("%s", hdev->name);
1406 
1407 	return hci_update_discoverable_sync(hdev);
1408 }
1409 
1410 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1411 			    u16 len)
1412 {
1413 	struct mgmt_cp_set_discoverable *cp = data;
1414 	struct mgmt_pending_cmd *cmd;
1415 	u16 timeout;
1416 	int err;
1417 
1418 	bt_dev_dbg(hdev, "sock %p", sk);
1419 
1420 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1421 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1422 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1423 				       MGMT_STATUS_REJECTED);
1424 
1425 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 				       MGMT_STATUS_INVALID_PARAMS);
1428 
1429 	timeout = __le16_to_cpu(cp->timeout);
1430 
1431 	/* Disabling discoverable requires that no timeout is set,
1432 	 * and enabling limited discoverable requires a timeout.
1433 	 */
1434 	if ((cp->val == 0x00 && timeout > 0) ||
1435 	    (cp->val == 0x02 && timeout == 0))
1436 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1437 				       MGMT_STATUS_INVALID_PARAMS);
1438 
1439 	hci_dev_lock(hdev);
1440 
1441 	if (!hdev_is_powered(hdev) && timeout > 0) {
1442 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 				      MGMT_STATUS_NOT_POWERED);
1444 		goto failed;
1445 	}
1446 
1447 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1448 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1449 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1450 				      MGMT_STATUS_BUSY);
1451 		goto failed;
1452 	}
1453 
1454 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1455 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1456 				      MGMT_STATUS_REJECTED);
1457 		goto failed;
1458 	}
1459 
1460 	if (hdev->advertising_paused) {
1461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 				      MGMT_STATUS_BUSY);
1463 		goto failed;
1464 	}
1465 
1466 	if (!hdev_is_powered(hdev)) {
1467 		bool changed = false;
1468 
1469 		/* Setting limited discoverable when powered off is
1470 		 * not a valid operation since it requires a timeout
1471 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1472 		 */
1473 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1474 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1475 			changed = true;
1476 		}
1477 
1478 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1479 		if (err < 0)
1480 			goto failed;
1481 
1482 		if (changed)
1483 			err = new_settings(hdev, sk);
1484 
1485 		goto failed;
1486 	}
1487 
1488 	/* If the current mode is the same, then just update the timeout
1489 	 * value with the new value. And if only the timeout gets updated,
1490 	 * then no need for any HCI transactions.
1491 	 */
1492 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1493 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1494 						   HCI_LIMITED_DISCOVERABLE)) {
1495 		cancel_delayed_work(&hdev->discov_off);
1496 		hdev->discov_timeout = timeout;
1497 
1498 		if (cp->val && hdev->discov_timeout > 0) {
1499 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1500 			queue_delayed_work(hdev->req_workqueue,
1501 					   &hdev->discov_off, to);
1502 		}
1503 
1504 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1505 		goto failed;
1506 	}
1507 
1508 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1509 	if (!cmd) {
1510 		err = -ENOMEM;
1511 		goto failed;
1512 	}
1513 
1514 	/* Cancel any potential discoverable timeout that might be
1515 	 * still active and store new timeout value. The arming of
1516 	 * the timeout happens in the complete handler.
1517 	 */
1518 	cancel_delayed_work(&hdev->discov_off);
1519 	hdev->discov_timeout = timeout;
1520 
1521 	if (cp->val)
1522 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1523 	else
1524 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1525 
1526 	/* Limited discoverable mode */
1527 	if (cp->val == 0x02)
1528 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1529 	else
1530 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1531 
1532 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1533 				 mgmt_set_discoverable_complete);
1534 
1535 failed:
1536 	hci_dev_unlock(hdev);
1537 	return err;
1538 }
1539 
1540 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1541 					  int err)
1542 {
1543 	struct mgmt_pending_cmd *cmd = data;
1544 
1545 	bt_dev_dbg(hdev, "err %d", err);
1546 
1547 	hci_dev_lock(hdev);
1548 
1549 	if (err) {
1550 		u8 mgmt_err = mgmt_status(err);
1551 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1552 		goto done;
1553 	}
1554 
1555 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1556 	new_settings(hdev, cmd->sk);
1557 
1558 done:
1559 	mgmt_pending_free(cmd);
1560 	hci_dev_unlock(hdev);
1561 }
1562 
1563 static int set_connectable_update_settings(struct hci_dev *hdev,
1564 					   struct sock *sk, u8 val)
1565 {
1566 	bool changed = false;
1567 	int err;
1568 
1569 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1570 		changed = true;
1571 
1572 	if (val) {
1573 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1574 	} else {
1575 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1576 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1577 	}
1578 
1579 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1580 	if (err < 0)
1581 		return err;
1582 
1583 	if (changed) {
1584 		hci_req_update_scan(hdev);
1585 		hci_update_passive_scan(hdev);
1586 		return new_settings(hdev, sk);
1587 	}
1588 
1589 	return 0;
1590 }
1591 
1592 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1593 {
1594 	BT_DBG("%s", hdev->name);
1595 
1596 	return hci_update_connectable_sync(hdev);
1597 }
1598 
1599 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1600 			   u16 len)
1601 {
1602 	struct mgmt_mode *cp = data;
1603 	struct mgmt_pending_cmd *cmd;
1604 	int err;
1605 
1606 	bt_dev_dbg(hdev, "sock %p", sk);
1607 
1608 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1609 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1610 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1611 				       MGMT_STATUS_REJECTED);
1612 
1613 	if (cp->val != 0x00 && cp->val != 0x01)
1614 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1615 				       MGMT_STATUS_INVALID_PARAMS);
1616 
1617 	hci_dev_lock(hdev);
1618 
1619 	if (!hdev_is_powered(hdev)) {
1620 		err = set_connectable_update_settings(hdev, sk, cp->val);
1621 		goto failed;
1622 	}
1623 
1624 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1625 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1626 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1627 				      MGMT_STATUS_BUSY);
1628 		goto failed;
1629 	}
1630 
1631 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1632 	if (!cmd) {
1633 		err = -ENOMEM;
1634 		goto failed;
1635 	}
1636 
1637 	if (cp->val) {
1638 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1639 	} else {
1640 		if (hdev->discov_timeout > 0)
1641 			cancel_delayed_work(&hdev->discov_off);
1642 
1643 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1644 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1645 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1646 	}
1647 
1648 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1649 				 mgmt_set_connectable_complete);
1650 
1651 failed:
1652 	hci_dev_unlock(hdev);
1653 	return err;
1654 }
1655 
1656 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1657 			u16 len)
1658 {
1659 	struct mgmt_mode *cp = data;
1660 	bool changed;
1661 	int err;
1662 
1663 	bt_dev_dbg(hdev, "sock %p", sk);
1664 
1665 	if (cp->val != 0x00 && cp->val != 0x01)
1666 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1667 				       MGMT_STATUS_INVALID_PARAMS);
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	if (cp->val)
1672 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1673 	else
1674 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1675 
1676 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1677 	if (err < 0)
1678 		goto unlock;
1679 
1680 	if (changed) {
1681 		/* In limited privacy mode the change of bondable mode
1682 		 * may affect the local advertising address.
1683 		 */
1684 		hci_update_discoverable(hdev);
1685 
1686 		err = new_settings(hdev, sk);
1687 	}
1688 
1689 unlock:
1690 	hci_dev_unlock(hdev);
1691 	return err;
1692 }
1693 
1694 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1695 			     u16 len)
1696 {
1697 	struct mgmt_mode *cp = data;
1698 	struct mgmt_pending_cmd *cmd;
1699 	u8 val, status;
1700 	int err;
1701 
1702 	bt_dev_dbg(hdev, "sock %p", sk);
1703 
1704 	status = mgmt_bredr_support(hdev);
1705 	if (status)
1706 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1707 				       status);
1708 
1709 	if (cp->val != 0x00 && cp->val != 0x01)
1710 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1711 				       MGMT_STATUS_INVALID_PARAMS);
1712 
1713 	hci_dev_lock(hdev);
1714 
1715 	if (!hdev_is_powered(hdev)) {
1716 		bool changed = false;
1717 
1718 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1719 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1720 			changed = true;
1721 		}
1722 
1723 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1724 		if (err < 0)
1725 			goto failed;
1726 
1727 		if (changed)
1728 			err = new_settings(hdev, sk);
1729 
1730 		goto failed;
1731 	}
1732 
1733 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1734 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1735 				      MGMT_STATUS_BUSY);
1736 		goto failed;
1737 	}
1738 
1739 	val = !!cp->val;
1740 
1741 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1742 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1743 		goto failed;
1744 	}
1745 
1746 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1747 	if (!cmd) {
1748 		err = -ENOMEM;
1749 		goto failed;
1750 	}
1751 
1752 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1753 	if (err < 0) {
1754 		mgmt_pending_remove(cmd);
1755 		goto failed;
1756 	}
1757 
1758 failed:
1759 	hci_dev_unlock(hdev);
1760 	return err;
1761 }
1762 
1763 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1764 {
1765 	struct cmd_lookup match = { NULL, hdev };
1766 	struct mgmt_pending_cmd *cmd = data;
1767 	struct mgmt_mode *cp = cmd->param;
1768 	u8 enable = cp->val;
1769 	bool changed;
1770 
1771 	if (err) {
1772 		u8 mgmt_err = mgmt_status(err);
1773 
1774 		if (enable && hci_dev_test_and_clear_flag(hdev,
1775 							  HCI_SSP_ENABLED)) {
1776 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1777 			new_settings(hdev, NULL);
1778 		}
1779 
1780 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1781 				     &mgmt_err);
1782 		return;
1783 	}
1784 
1785 	if (enable) {
1786 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1787 	} else {
1788 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1789 
1790 		if (!changed)
1791 			changed = hci_dev_test_and_clear_flag(hdev,
1792 							      HCI_HS_ENABLED);
1793 		else
1794 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1795 	}
1796 
1797 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1798 
1799 	if (changed)
1800 		new_settings(hdev, match.sk);
1801 
1802 	if (match.sk)
1803 		sock_put(match.sk);
1804 
1805 	hci_update_eir_sync(hdev);
1806 }
1807 
1808 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1809 {
1810 	struct mgmt_pending_cmd *cmd = data;
1811 	struct mgmt_mode *cp = cmd->param;
1812 	bool changed = false;
1813 	int err;
1814 
1815 	if (cp->val)
1816 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1817 
1818 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1819 
1820 	if (!err && changed)
1821 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1822 
1823 	return err;
1824 }
1825 
1826 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1827 {
1828 	struct mgmt_mode *cp = data;
1829 	struct mgmt_pending_cmd *cmd;
1830 	u8 status;
1831 	int err;
1832 
1833 	bt_dev_dbg(hdev, "sock %p", sk);
1834 
1835 	status = mgmt_bredr_support(hdev);
1836 	if (status)
1837 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1838 
1839 	if (!lmp_ssp_capable(hdev))
1840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1841 				       MGMT_STATUS_NOT_SUPPORTED);
1842 
1843 	if (cp->val != 0x00 && cp->val != 0x01)
1844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1845 				       MGMT_STATUS_INVALID_PARAMS);
1846 
1847 	hci_dev_lock(hdev);
1848 
1849 	if (!hdev_is_powered(hdev)) {
1850 		bool changed;
1851 
1852 		if (cp->val) {
1853 			changed = !hci_dev_test_and_set_flag(hdev,
1854 							     HCI_SSP_ENABLED);
1855 		} else {
1856 			changed = hci_dev_test_and_clear_flag(hdev,
1857 							      HCI_SSP_ENABLED);
1858 			if (!changed)
1859 				changed = hci_dev_test_and_clear_flag(hdev,
1860 								      HCI_HS_ENABLED);
1861 			else
1862 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1863 		}
1864 
1865 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1866 		if (err < 0)
1867 			goto failed;
1868 
1869 		if (changed)
1870 			err = new_settings(hdev, sk);
1871 
1872 		goto failed;
1873 	}
1874 
1875 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1876 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1877 				      MGMT_STATUS_BUSY);
1878 		goto failed;
1879 	}
1880 
1881 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1882 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1883 		goto failed;
1884 	}
1885 
1886 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1887 	if (!cmd)
1888 		err = -ENOMEM;
1889 	else
1890 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1891 					 set_ssp_complete);
1892 
1893 	if (err < 0) {
1894 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1895 				      MGMT_STATUS_FAILED);
1896 
1897 		if (cmd)
1898 			mgmt_pending_remove(cmd);
1899 	}
1900 
1901 failed:
1902 	hci_dev_unlock(hdev);
1903 	return err;
1904 }
1905 
1906 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1907 {
1908 	struct mgmt_mode *cp = data;
1909 	bool changed;
1910 	u8 status;
1911 	int err;
1912 
1913 	bt_dev_dbg(hdev, "sock %p", sk);
1914 
1915 	if (!IS_ENABLED(CONFIG_BT_HS))
1916 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1917 				       MGMT_STATUS_NOT_SUPPORTED);
1918 
1919 	status = mgmt_bredr_support(hdev);
1920 	if (status)
1921 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1922 
1923 	if (!lmp_ssp_capable(hdev))
1924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1925 				       MGMT_STATUS_NOT_SUPPORTED);
1926 
1927 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1928 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1929 				       MGMT_STATUS_REJECTED);
1930 
1931 	if (cp->val != 0x00 && cp->val != 0x01)
1932 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1933 				       MGMT_STATUS_INVALID_PARAMS);
1934 
1935 	hci_dev_lock(hdev);
1936 
1937 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1938 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1939 				      MGMT_STATUS_BUSY);
1940 		goto unlock;
1941 	}
1942 
1943 	if (cp->val) {
1944 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1945 	} else {
1946 		if (hdev_is_powered(hdev)) {
1947 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1948 					      MGMT_STATUS_REJECTED);
1949 			goto unlock;
1950 		}
1951 
1952 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1953 	}
1954 
1955 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1956 	if (err < 0)
1957 		goto unlock;
1958 
1959 	if (changed)
1960 		err = new_settings(hdev, sk);
1961 
1962 unlock:
1963 	hci_dev_unlock(hdev);
1964 	return err;
1965 }
1966 
1967 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
1968 {
1969 	struct cmd_lookup match = { NULL, hdev };
1970 	u8 status = mgmt_status(err);
1971 
1972 	bt_dev_dbg(hdev, "err %d", err);
1973 
1974 	if (status) {
1975 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1976 							&status);
1977 		return;
1978 	}
1979 
1980 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1981 
1982 	new_settings(hdev, match.sk);
1983 
1984 	if (match.sk)
1985 		sock_put(match.sk);
1986 }
1987 
1988 static int set_le_sync(struct hci_dev *hdev, void *data)
1989 {
1990 	struct mgmt_pending_cmd *cmd = data;
1991 	struct mgmt_mode *cp = cmd->param;
1992 	u8 val = !!cp->val;
1993 	int err;
1994 
1995 	if (!val) {
1996 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1997 			hci_disable_advertising_sync(hdev);
1998 
1999 		if (ext_adv_capable(hdev))
2000 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2001 	} else {
2002 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2003 	}
2004 
2005 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2006 
2007 	/* Make sure the controller has a good default for
2008 	 * advertising data. Restrict the update to when LE
2009 	 * has actually been enabled. During power on, the
2010 	 * update in powered_update_hci will take care of it.
2011 	 */
2012 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2013 		if (ext_adv_capable(hdev)) {
2014 			int status;
2015 
2016 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2017 			if (!status)
2018 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2019 		} else {
2020 			hci_update_adv_data_sync(hdev, 0x00);
2021 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2022 		}
2023 
2024 		hci_update_passive_scan(hdev);
2025 	}
2026 
2027 	return err;
2028 }
2029 
2030 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2031 {
2032 	struct mgmt_mode *cp = data;
2033 	struct mgmt_pending_cmd *cmd;
2034 	int err;
2035 	u8 val, enabled;
2036 
2037 	bt_dev_dbg(hdev, "sock %p", sk);
2038 
2039 	if (!lmp_le_capable(hdev))
2040 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2041 				       MGMT_STATUS_NOT_SUPPORTED);
2042 
2043 	if (cp->val != 0x00 && cp->val != 0x01)
2044 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2045 				       MGMT_STATUS_INVALID_PARAMS);
2046 
2047 	/* Bluetooth single mode LE only controllers or dual-mode
2048 	 * controllers configured as LE only devices, do not allow
2049 	 * switching LE off. These have either LE enabled explicitly
2050 	 * or BR/EDR has been previously switched off.
2051 	 *
2052 	 * When trying to enable an already enabled LE, then gracefully
2053 	 * send a positive response. Trying to disable it however will
2054 	 * result into rejection.
2055 	 */
2056 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2057 		if (cp->val == 0x01)
2058 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2059 
2060 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2061 				       MGMT_STATUS_REJECTED);
2062 	}
2063 
2064 	hci_dev_lock(hdev);
2065 
2066 	val = !!cp->val;
2067 	enabled = lmp_host_le_capable(hdev);
2068 
2069 	if (!val)
2070 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2071 
2072 	if (!hdev_is_powered(hdev) || val == enabled) {
2073 		bool changed = false;
2074 
2075 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2076 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2077 			changed = true;
2078 		}
2079 
2080 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2081 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2082 			changed = true;
2083 		}
2084 
2085 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2086 		if (err < 0)
2087 			goto unlock;
2088 
2089 		if (changed)
2090 			err = new_settings(hdev, sk);
2091 
2092 		goto unlock;
2093 	}
2094 
2095 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2096 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2097 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				      MGMT_STATUS_BUSY);
2099 		goto unlock;
2100 	}
2101 
2102 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2103 	if (!cmd)
2104 		err = -ENOMEM;
2105 	else
2106 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2107 					 set_le_complete);
2108 
2109 	if (err < 0) {
2110 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2111 				      MGMT_STATUS_FAILED);
2112 
2113 		if (cmd)
2114 			mgmt_pending_remove(cmd);
2115 	}
2116 
2117 unlock:
2118 	hci_dev_unlock(hdev);
2119 	return err;
2120 }
2121 
2122 /* This is a helper function to test for pending mgmt commands that can
2123  * cause CoD or EIR HCI commands. We can only allow one such pending
2124  * mgmt command at a time since otherwise we cannot easily track what
2125  * the current values are, will be, and based on that calculate if a new
2126  * HCI command needs to be sent and if yes with what value.
2127  */
2128 static bool pending_eir_or_class(struct hci_dev *hdev)
2129 {
2130 	struct mgmt_pending_cmd *cmd;
2131 
2132 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2133 		switch (cmd->opcode) {
2134 		case MGMT_OP_ADD_UUID:
2135 		case MGMT_OP_REMOVE_UUID:
2136 		case MGMT_OP_SET_DEV_CLASS:
2137 		case MGMT_OP_SET_POWERED:
2138 			return true;
2139 		}
2140 	}
2141 
2142 	return false;
2143 }
2144 
2145 static const u8 bluetooth_base_uuid[] = {
2146 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2147 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2148 };
2149 
2150 static u8 get_uuid_size(const u8 *uuid)
2151 {
2152 	u32 val;
2153 
2154 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2155 		return 128;
2156 
2157 	val = get_unaligned_le32(&uuid[12]);
2158 	if (val > 0xffff)
2159 		return 32;
2160 
2161 	return 16;
2162 }
2163 
2164 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2165 {
2166 	struct mgmt_pending_cmd *cmd = data;
2167 
2168 	bt_dev_dbg(hdev, "err %d", err);
2169 
2170 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2171 			  mgmt_status(err), hdev->dev_class, 3);
2172 
2173 	mgmt_pending_free(cmd);
2174 }
2175 
2176 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2177 {
2178 	int err;
2179 
2180 	err = hci_update_class_sync(hdev);
2181 	if (err)
2182 		return err;
2183 
2184 	return hci_update_eir_sync(hdev);
2185 }
2186 
2187 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2188 {
2189 	struct mgmt_cp_add_uuid *cp = data;
2190 	struct mgmt_pending_cmd *cmd;
2191 	struct bt_uuid *uuid;
2192 	int err;
2193 
2194 	bt_dev_dbg(hdev, "sock %p", sk);
2195 
2196 	hci_dev_lock(hdev);
2197 
2198 	if (pending_eir_or_class(hdev)) {
2199 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2200 				      MGMT_STATUS_BUSY);
2201 		goto failed;
2202 	}
2203 
2204 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2205 	if (!uuid) {
2206 		err = -ENOMEM;
2207 		goto failed;
2208 	}
2209 
2210 	memcpy(uuid->uuid, cp->uuid, 16);
2211 	uuid->svc_hint = cp->svc_hint;
2212 	uuid->size = get_uuid_size(cp->uuid);
2213 
2214 	list_add_tail(&uuid->list, &hdev->uuids);
2215 
2216 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2217 	if (!cmd) {
2218 		err = -ENOMEM;
2219 		goto failed;
2220 	}
2221 
2222 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2223 	if (err < 0) {
2224 		mgmt_pending_free(cmd);
2225 		goto failed;
2226 	}
2227 
2228 failed:
2229 	hci_dev_unlock(hdev);
2230 	return err;
2231 }
2232 
2233 static bool enable_service_cache(struct hci_dev *hdev)
2234 {
2235 	if (!hdev_is_powered(hdev))
2236 		return false;
2237 
2238 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2239 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2240 				   CACHE_TIMEOUT);
2241 		return true;
2242 	}
2243 
2244 	return false;
2245 }
2246 
2247 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2248 {
2249 	int err;
2250 
2251 	err = hci_update_class_sync(hdev);
2252 	if (err)
2253 		return err;
2254 
2255 	return hci_update_eir_sync(hdev);
2256 }
2257 
2258 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2259 		       u16 len)
2260 {
2261 	struct mgmt_cp_remove_uuid *cp = data;
2262 	struct mgmt_pending_cmd *cmd;
2263 	struct bt_uuid *match, *tmp;
2264 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2265 	int err, found;
2266 
2267 	bt_dev_dbg(hdev, "sock %p", sk);
2268 
2269 	hci_dev_lock(hdev);
2270 
2271 	if (pending_eir_or_class(hdev)) {
2272 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2273 				      MGMT_STATUS_BUSY);
2274 		goto unlock;
2275 	}
2276 
2277 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2278 		hci_uuids_clear(hdev);
2279 
2280 		if (enable_service_cache(hdev)) {
2281 			err = mgmt_cmd_complete(sk, hdev->id,
2282 						MGMT_OP_REMOVE_UUID,
2283 						0, hdev->dev_class, 3);
2284 			goto unlock;
2285 		}
2286 
2287 		goto update_class;
2288 	}
2289 
2290 	found = 0;
2291 
2292 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2293 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2294 			continue;
2295 
2296 		list_del(&match->list);
2297 		kfree(match);
2298 		found++;
2299 	}
2300 
2301 	if (found == 0) {
2302 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2303 				      MGMT_STATUS_INVALID_PARAMS);
2304 		goto unlock;
2305 	}
2306 
2307 update_class:
2308 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2309 	if (!cmd) {
2310 		err = -ENOMEM;
2311 		goto unlock;
2312 	}
2313 
2314 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2315 				 mgmt_class_complete);
2316 	if (err < 0)
2317 		mgmt_pending_free(cmd);
2318 
2319 unlock:
2320 	hci_dev_unlock(hdev);
2321 	return err;
2322 }
2323 
2324 static int set_class_sync(struct hci_dev *hdev, void *data)
2325 {
2326 	int err = 0;
2327 
2328 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2329 		cancel_delayed_work_sync(&hdev->service_cache);
2330 		err = hci_update_eir_sync(hdev);
2331 	}
2332 
2333 	if (err)
2334 		return err;
2335 
2336 	return hci_update_class_sync(hdev);
2337 }
2338 
2339 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2340 			 u16 len)
2341 {
2342 	struct mgmt_cp_set_dev_class *cp = data;
2343 	struct mgmt_pending_cmd *cmd;
2344 	int err;
2345 
2346 	bt_dev_dbg(hdev, "sock %p", sk);
2347 
2348 	if (!lmp_bredr_capable(hdev))
2349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2350 				       MGMT_STATUS_NOT_SUPPORTED);
2351 
2352 	hci_dev_lock(hdev);
2353 
2354 	if (pending_eir_or_class(hdev)) {
2355 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2356 				      MGMT_STATUS_BUSY);
2357 		goto unlock;
2358 	}
2359 
2360 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2361 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2362 				      MGMT_STATUS_INVALID_PARAMS);
2363 		goto unlock;
2364 	}
2365 
2366 	hdev->major_class = cp->major;
2367 	hdev->minor_class = cp->minor;
2368 
2369 	if (!hdev_is_powered(hdev)) {
2370 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2371 					hdev->dev_class, 3);
2372 		goto unlock;
2373 	}
2374 
2375 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2376 	if (!cmd) {
2377 		err = -ENOMEM;
2378 		goto unlock;
2379 	}
2380 
2381 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2382 				 mgmt_class_complete);
2383 	if (err < 0)
2384 		mgmt_pending_free(cmd);
2385 
2386 unlock:
2387 	hci_dev_unlock(hdev);
2388 	return err;
2389 }
2390 
2391 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2392 			  u16 len)
2393 {
2394 	struct mgmt_cp_load_link_keys *cp = data;
2395 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2396 				   sizeof(struct mgmt_link_key_info));
2397 	u16 key_count, expected_len;
2398 	bool changed;
2399 	int i;
2400 
2401 	bt_dev_dbg(hdev, "sock %p", sk);
2402 
2403 	if (!lmp_bredr_capable(hdev))
2404 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2405 				       MGMT_STATUS_NOT_SUPPORTED);
2406 
2407 	key_count = __le16_to_cpu(cp->key_count);
2408 	if (key_count > max_key_count) {
2409 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2410 			   key_count);
2411 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2412 				       MGMT_STATUS_INVALID_PARAMS);
2413 	}
2414 
2415 	expected_len = struct_size(cp, keys, key_count);
2416 	if (expected_len != len) {
2417 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2418 			   expected_len, len);
2419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2420 				       MGMT_STATUS_INVALID_PARAMS);
2421 	}
2422 
2423 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2424 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2425 				       MGMT_STATUS_INVALID_PARAMS);
2426 
2427 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2428 		   key_count);
2429 
2430 	for (i = 0; i < key_count; i++) {
2431 		struct mgmt_link_key_info *key = &cp->keys[i];
2432 
2433 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2434 			return mgmt_cmd_status(sk, hdev->id,
2435 					       MGMT_OP_LOAD_LINK_KEYS,
2436 					       MGMT_STATUS_INVALID_PARAMS);
2437 	}
2438 
2439 	hci_dev_lock(hdev);
2440 
2441 	hci_link_keys_clear(hdev);
2442 
2443 	if (cp->debug_keys)
2444 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2445 	else
2446 		changed = hci_dev_test_and_clear_flag(hdev,
2447 						      HCI_KEEP_DEBUG_KEYS);
2448 
2449 	if (changed)
2450 		new_settings(hdev, NULL);
2451 
2452 	for (i = 0; i < key_count; i++) {
2453 		struct mgmt_link_key_info *key = &cp->keys[i];
2454 
2455 		if (hci_is_blocked_key(hdev,
2456 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2457 				       key->val)) {
2458 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2459 				    &key->addr.bdaddr);
2460 			continue;
2461 		}
2462 
2463 		/* Always ignore debug keys and require a new pairing if
2464 		 * the user wants to use them.
2465 		 */
2466 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2467 			continue;
2468 
2469 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2470 				 key->type, key->pin_len, NULL);
2471 	}
2472 
2473 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2474 
2475 	hci_dev_unlock(hdev);
2476 
2477 	return 0;
2478 }
2479 
2480 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2481 			   u8 addr_type, struct sock *skip_sk)
2482 {
2483 	struct mgmt_ev_device_unpaired ev;
2484 
2485 	bacpy(&ev.addr.bdaddr, bdaddr);
2486 	ev.addr.type = addr_type;
2487 
2488 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2489 			  skip_sk);
2490 }
2491 
2492 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2493 			 u16 len)
2494 {
2495 	struct mgmt_cp_unpair_device *cp = data;
2496 	struct mgmt_rp_unpair_device rp;
2497 	struct hci_conn_params *params;
2498 	struct mgmt_pending_cmd *cmd;
2499 	struct hci_conn *conn;
2500 	u8 addr_type;
2501 	int err;
2502 
2503 	memset(&rp, 0, sizeof(rp));
2504 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2505 	rp.addr.type = cp->addr.type;
2506 
2507 	if (!bdaddr_type_is_valid(cp->addr.type))
2508 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2509 					 MGMT_STATUS_INVALID_PARAMS,
2510 					 &rp, sizeof(rp));
2511 
2512 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2513 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2514 					 MGMT_STATUS_INVALID_PARAMS,
2515 					 &rp, sizeof(rp));
2516 
2517 	hci_dev_lock(hdev);
2518 
2519 	if (!hdev_is_powered(hdev)) {
2520 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2521 					MGMT_STATUS_NOT_POWERED, &rp,
2522 					sizeof(rp));
2523 		goto unlock;
2524 	}
2525 
2526 	if (cp->addr.type == BDADDR_BREDR) {
2527 		/* If disconnection is requested, then look up the
2528 		 * connection. If the remote device is connected, it
2529 		 * will be later used to terminate the link.
2530 		 *
2531 		 * Setting it to NULL explicitly will cause no
2532 		 * termination of the link.
2533 		 */
2534 		if (cp->disconnect)
2535 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2536 						       &cp->addr.bdaddr);
2537 		else
2538 			conn = NULL;
2539 
2540 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2541 		if (err < 0) {
2542 			err = mgmt_cmd_complete(sk, hdev->id,
2543 						MGMT_OP_UNPAIR_DEVICE,
2544 						MGMT_STATUS_NOT_PAIRED, &rp,
2545 						sizeof(rp));
2546 			goto unlock;
2547 		}
2548 
2549 		goto done;
2550 	}
2551 
2552 	/* LE address type */
2553 	addr_type = le_addr_type(cp->addr.type);
2554 
2555 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2556 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2557 	if (err < 0) {
2558 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2559 					MGMT_STATUS_NOT_PAIRED, &rp,
2560 					sizeof(rp));
2561 		goto unlock;
2562 	}
2563 
2564 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2565 	if (!conn) {
2566 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2567 		goto done;
2568 	}
2569 
2570 
2571 	/* Defer clearing up the connection parameters until closing to
2572 	 * give a chance of keeping them if a repairing happens.
2573 	 */
2574 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2575 
2576 	/* Disable auto-connection parameters if present */
2577 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2578 	if (params) {
2579 		if (params->explicit_connect)
2580 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2581 		else
2582 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2583 	}
2584 
2585 	/* If disconnection is not requested, then clear the connection
2586 	 * variable so that the link is not terminated.
2587 	 */
2588 	if (!cp->disconnect)
2589 		conn = NULL;
2590 
2591 done:
2592 	/* If the connection variable is set, then termination of the
2593 	 * link is requested.
2594 	 */
2595 	if (!conn) {
2596 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2597 					&rp, sizeof(rp));
2598 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2599 		goto unlock;
2600 	}
2601 
2602 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2603 			       sizeof(*cp));
2604 	if (!cmd) {
2605 		err = -ENOMEM;
2606 		goto unlock;
2607 	}
2608 
2609 	cmd->cmd_complete = addr_cmd_complete;
2610 
2611 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2612 	if (err < 0)
2613 		mgmt_pending_remove(cmd);
2614 
2615 unlock:
2616 	hci_dev_unlock(hdev);
2617 	return err;
2618 }
2619 
2620 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2621 		      u16 len)
2622 {
2623 	struct mgmt_cp_disconnect *cp = data;
2624 	struct mgmt_rp_disconnect rp;
2625 	struct mgmt_pending_cmd *cmd;
2626 	struct hci_conn *conn;
2627 	int err;
2628 
2629 	bt_dev_dbg(hdev, "sock %p", sk);
2630 
2631 	memset(&rp, 0, sizeof(rp));
2632 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2633 	rp.addr.type = cp->addr.type;
2634 
2635 	if (!bdaddr_type_is_valid(cp->addr.type))
2636 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2637 					 MGMT_STATUS_INVALID_PARAMS,
2638 					 &rp, sizeof(rp));
2639 
2640 	hci_dev_lock(hdev);
2641 
2642 	if (!test_bit(HCI_UP, &hdev->flags)) {
2643 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2644 					MGMT_STATUS_NOT_POWERED, &rp,
2645 					sizeof(rp));
2646 		goto failed;
2647 	}
2648 
2649 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2650 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2651 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2652 		goto failed;
2653 	}
2654 
2655 	if (cp->addr.type == BDADDR_BREDR)
2656 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2657 					       &cp->addr.bdaddr);
2658 	else
2659 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2660 					       le_addr_type(cp->addr.type));
2661 
2662 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2663 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2664 					MGMT_STATUS_NOT_CONNECTED, &rp,
2665 					sizeof(rp));
2666 		goto failed;
2667 	}
2668 
2669 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2670 	if (!cmd) {
2671 		err = -ENOMEM;
2672 		goto failed;
2673 	}
2674 
2675 	cmd->cmd_complete = generic_cmd_complete;
2676 
2677 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2678 	if (err < 0)
2679 		mgmt_pending_remove(cmd);
2680 
2681 failed:
2682 	hci_dev_unlock(hdev);
2683 	return err;
2684 }
2685 
2686 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2687 {
2688 	switch (link_type) {
2689 	case LE_LINK:
2690 		switch (addr_type) {
2691 		case ADDR_LE_DEV_PUBLIC:
2692 			return BDADDR_LE_PUBLIC;
2693 
2694 		default:
2695 			/* Fallback to LE Random address type */
2696 			return BDADDR_LE_RANDOM;
2697 		}
2698 
2699 	default:
2700 		/* Fallback to BR/EDR type */
2701 		return BDADDR_BREDR;
2702 	}
2703 }
2704 
2705 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2706 			   u16 data_len)
2707 {
2708 	struct mgmt_rp_get_connections *rp;
2709 	struct hci_conn *c;
2710 	int err;
2711 	u16 i;
2712 
2713 	bt_dev_dbg(hdev, "sock %p", sk);
2714 
2715 	hci_dev_lock(hdev);
2716 
2717 	if (!hdev_is_powered(hdev)) {
2718 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2719 				      MGMT_STATUS_NOT_POWERED);
2720 		goto unlock;
2721 	}
2722 
2723 	i = 0;
2724 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2725 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2726 			i++;
2727 	}
2728 
2729 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2730 	if (!rp) {
2731 		err = -ENOMEM;
2732 		goto unlock;
2733 	}
2734 
2735 	i = 0;
2736 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2737 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2738 			continue;
2739 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2740 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2741 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2742 			continue;
2743 		i++;
2744 	}
2745 
2746 	rp->conn_count = cpu_to_le16(i);
2747 
2748 	/* Recalculate length in case of filtered SCO connections, etc */
2749 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2750 				struct_size(rp, addr, i));
2751 
2752 	kfree(rp);
2753 
2754 unlock:
2755 	hci_dev_unlock(hdev);
2756 	return err;
2757 }
2758 
2759 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2760 				   struct mgmt_cp_pin_code_neg_reply *cp)
2761 {
2762 	struct mgmt_pending_cmd *cmd;
2763 	int err;
2764 
2765 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2766 			       sizeof(*cp));
2767 	if (!cmd)
2768 		return -ENOMEM;
2769 
2770 	cmd->cmd_complete = addr_cmd_complete;
2771 
2772 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2773 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2774 	if (err < 0)
2775 		mgmt_pending_remove(cmd);
2776 
2777 	return err;
2778 }
2779 
2780 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2781 			  u16 len)
2782 {
2783 	struct hci_conn *conn;
2784 	struct mgmt_cp_pin_code_reply *cp = data;
2785 	struct hci_cp_pin_code_reply reply;
2786 	struct mgmt_pending_cmd *cmd;
2787 	int err;
2788 
2789 	bt_dev_dbg(hdev, "sock %p", sk);
2790 
2791 	hci_dev_lock(hdev);
2792 
2793 	if (!hdev_is_powered(hdev)) {
2794 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2795 				      MGMT_STATUS_NOT_POWERED);
2796 		goto failed;
2797 	}
2798 
2799 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2800 	if (!conn) {
2801 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2802 				      MGMT_STATUS_NOT_CONNECTED);
2803 		goto failed;
2804 	}
2805 
2806 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2807 		struct mgmt_cp_pin_code_neg_reply ncp;
2808 
2809 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2810 
2811 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2812 
2813 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2814 		if (err >= 0)
2815 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2816 					      MGMT_STATUS_INVALID_PARAMS);
2817 
2818 		goto failed;
2819 	}
2820 
2821 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2822 	if (!cmd) {
2823 		err = -ENOMEM;
2824 		goto failed;
2825 	}
2826 
2827 	cmd->cmd_complete = addr_cmd_complete;
2828 
2829 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2830 	reply.pin_len = cp->pin_len;
2831 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2832 
2833 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2834 	if (err < 0)
2835 		mgmt_pending_remove(cmd);
2836 
2837 failed:
2838 	hci_dev_unlock(hdev);
2839 	return err;
2840 }
2841 
2842 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2843 			     u16 len)
2844 {
2845 	struct mgmt_cp_set_io_capability *cp = data;
2846 
2847 	bt_dev_dbg(hdev, "sock %p", sk);
2848 
2849 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2850 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2851 				       MGMT_STATUS_INVALID_PARAMS);
2852 
2853 	hci_dev_lock(hdev);
2854 
2855 	hdev->io_capability = cp->io_capability;
2856 
2857 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2858 
2859 	hci_dev_unlock(hdev);
2860 
2861 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2862 				 NULL, 0);
2863 }
2864 
2865 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2866 {
2867 	struct hci_dev *hdev = conn->hdev;
2868 	struct mgmt_pending_cmd *cmd;
2869 
2870 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2871 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2872 			continue;
2873 
2874 		if (cmd->user_data != conn)
2875 			continue;
2876 
2877 		return cmd;
2878 	}
2879 
2880 	return NULL;
2881 }
2882 
2883 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2884 {
2885 	struct mgmt_rp_pair_device rp;
2886 	struct hci_conn *conn = cmd->user_data;
2887 	int err;
2888 
2889 	bacpy(&rp.addr.bdaddr, &conn->dst);
2890 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2891 
2892 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2893 				status, &rp, sizeof(rp));
2894 
2895 	/* So we don't get further callbacks for this connection */
2896 	conn->connect_cfm_cb = NULL;
2897 	conn->security_cfm_cb = NULL;
2898 	conn->disconn_cfm_cb = NULL;
2899 
2900 	hci_conn_drop(conn);
2901 
2902 	/* The device is paired so there is no need to remove
2903 	 * its connection parameters anymore.
2904 	 */
2905 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2906 
2907 	hci_conn_put(conn);
2908 
2909 	return err;
2910 }
2911 
2912 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2913 {
2914 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2915 	struct mgmt_pending_cmd *cmd;
2916 
2917 	cmd = find_pairing(conn);
2918 	if (cmd) {
2919 		cmd->cmd_complete(cmd, status);
2920 		mgmt_pending_remove(cmd);
2921 	}
2922 }
2923 
2924 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2925 {
2926 	struct mgmt_pending_cmd *cmd;
2927 
2928 	BT_DBG("status %u", status);
2929 
2930 	cmd = find_pairing(conn);
2931 	if (!cmd) {
2932 		BT_DBG("Unable to find a pending command");
2933 		return;
2934 	}
2935 
2936 	cmd->cmd_complete(cmd, mgmt_status(status));
2937 	mgmt_pending_remove(cmd);
2938 }
2939 
2940 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2941 {
2942 	struct mgmt_pending_cmd *cmd;
2943 
2944 	BT_DBG("status %u", status);
2945 
2946 	if (!status)
2947 		return;
2948 
2949 	cmd = find_pairing(conn);
2950 	if (!cmd) {
2951 		BT_DBG("Unable to find a pending command");
2952 		return;
2953 	}
2954 
2955 	cmd->cmd_complete(cmd, mgmt_status(status));
2956 	mgmt_pending_remove(cmd);
2957 }
2958 
2959 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2960 		       u16 len)
2961 {
2962 	struct mgmt_cp_pair_device *cp = data;
2963 	struct mgmt_rp_pair_device rp;
2964 	struct mgmt_pending_cmd *cmd;
2965 	u8 sec_level, auth_type;
2966 	struct hci_conn *conn;
2967 	int err;
2968 
2969 	bt_dev_dbg(hdev, "sock %p", sk);
2970 
2971 	memset(&rp, 0, sizeof(rp));
2972 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2973 	rp.addr.type = cp->addr.type;
2974 
2975 	if (!bdaddr_type_is_valid(cp->addr.type))
2976 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2977 					 MGMT_STATUS_INVALID_PARAMS,
2978 					 &rp, sizeof(rp));
2979 
2980 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2981 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2982 					 MGMT_STATUS_INVALID_PARAMS,
2983 					 &rp, sizeof(rp));
2984 
2985 	hci_dev_lock(hdev);
2986 
2987 	if (!hdev_is_powered(hdev)) {
2988 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2989 					MGMT_STATUS_NOT_POWERED, &rp,
2990 					sizeof(rp));
2991 		goto unlock;
2992 	}
2993 
2994 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2995 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2996 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2997 					sizeof(rp));
2998 		goto unlock;
2999 	}
3000 
3001 	sec_level = BT_SECURITY_MEDIUM;
3002 	auth_type = HCI_AT_DEDICATED_BONDING;
3003 
3004 	if (cp->addr.type == BDADDR_BREDR) {
3005 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3006 				       auth_type, CONN_REASON_PAIR_DEVICE);
3007 	} else {
3008 		u8 addr_type = le_addr_type(cp->addr.type);
3009 		struct hci_conn_params *p;
3010 
3011 		/* When pairing a new device, it is expected to remember
3012 		 * this device for future connections. Adding the connection
3013 		 * parameter information ahead of time allows tracking
3014 		 * of the peripheral preferred values and will speed up any
3015 		 * further connection establishment.
3016 		 *
3017 		 * If connection parameters already exist, then they
3018 		 * will be kept and this function does nothing.
3019 		 */
3020 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3021 
3022 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3023 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3024 
3025 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3026 					   sec_level, HCI_LE_CONN_TIMEOUT,
3027 					   CONN_REASON_PAIR_DEVICE);
3028 	}
3029 
3030 	if (IS_ERR(conn)) {
3031 		int status;
3032 
3033 		if (PTR_ERR(conn) == -EBUSY)
3034 			status = MGMT_STATUS_BUSY;
3035 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3036 			status = MGMT_STATUS_NOT_SUPPORTED;
3037 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3038 			status = MGMT_STATUS_REJECTED;
3039 		else
3040 			status = MGMT_STATUS_CONNECT_FAILED;
3041 
3042 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3043 					status, &rp, sizeof(rp));
3044 		goto unlock;
3045 	}
3046 
3047 	if (conn->connect_cfm_cb) {
3048 		hci_conn_drop(conn);
3049 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3050 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3051 		goto unlock;
3052 	}
3053 
3054 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3055 	if (!cmd) {
3056 		err = -ENOMEM;
3057 		hci_conn_drop(conn);
3058 		goto unlock;
3059 	}
3060 
3061 	cmd->cmd_complete = pairing_complete;
3062 
3063 	/* For LE, just connecting isn't a proof that the pairing finished */
3064 	if (cp->addr.type == BDADDR_BREDR) {
3065 		conn->connect_cfm_cb = pairing_complete_cb;
3066 		conn->security_cfm_cb = pairing_complete_cb;
3067 		conn->disconn_cfm_cb = pairing_complete_cb;
3068 	} else {
3069 		conn->connect_cfm_cb = le_pairing_complete_cb;
3070 		conn->security_cfm_cb = le_pairing_complete_cb;
3071 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3072 	}
3073 
3074 	conn->io_capability = cp->io_cap;
3075 	cmd->user_data = hci_conn_get(conn);
3076 
3077 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3078 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3079 		cmd->cmd_complete(cmd, 0);
3080 		mgmt_pending_remove(cmd);
3081 	}
3082 
3083 	err = 0;
3084 
3085 unlock:
3086 	hci_dev_unlock(hdev);
3087 	return err;
3088 }
3089 
3090 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3091 			      u16 len)
3092 {
3093 	struct mgmt_addr_info *addr = data;
3094 	struct mgmt_pending_cmd *cmd;
3095 	struct hci_conn *conn;
3096 	int err;
3097 
3098 	bt_dev_dbg(hdev, "sock %p", sk);
3099 
3100 	hci_dev_lock(hdev);
3101 
3102 	if (!hdev_is_powered(hdev)) {
3103 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3104 				      MGMT_STATUS_NOT_POWERED);
3105 		goto unlock;
3106 	}
3107 
3108 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3109 	if (!cmd) {
3110 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3111 				      MGMT_STATUS_INVALID_PARAMS);
3112 		goto unlock;
3113 	}
3114 
3115 	conn = cmd->user_data;
3116 
3117 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3118 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3119 				      MGMT_STATUS_INVALID_PARAMS);
3120 		goto unlock;
3121 	}
3122 
3123 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3124 	mgmt_pending_remove(cmd);
3125 
3126 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3127 				addr, sizeof(*addr));
3128 
3129 	/* Since user doesn't want to proceed with the connection, abort any
3130 	 * ongoing pairing and then terminate the link if it was created
3131 	 * because of the pair device action.
3132 	 */
3133 	if (addr->type == BDADDR_BREDR)
3134 		hci_remove_link_key(hdev, &addr->bdaddr);
3135 	else
3136 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3137 					      le_addr_type(addr->type));
3138 
3139 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3140 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3141 
3142 unlock:
3143 	hci_dev_unlock(hdev);
3144 	return err;
3145 }
3146 
3147 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3148 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3149 			     u16 hci_op, __le32 passkey)
3150 {
3151 	struct mgmt_pending_cmd *cmd;
3152 	struct hci_conn *conn;
3153 	int err;
3154 
3155 	hci_dev_lock(hdev);
3156 
3157 	if (!hdev_is_powered(hdev)) {
3158 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3159 					MGMT_STATUS_NOT_POWERED, addr,
3160 					sizeof(*addr));
3161 		goto done;
3162 	}
3163 
3164 	if (addr->type == BDADDR_BREDR)
3165 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3166 	else
3167 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3168 					       le_addr_type(addr->type));
3169 
3170 	if (!conn) {
3171 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3172 					MGMT_STATUS_NOT_CONNECTED, addr,
3173 					sizeof(*addr));
3174 		goto done;
3175 	}
3176 
3177 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3178 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3179 		if (!err)
3180 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3181 						MGMT_STATUS_SUCCESS, addr,
3182 						sizeof(*addr));
3183 		else
3184 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3185 						MGMT_STATUS_FAILED, addr,
3186 						sizeof(*addr));
3187 
3188 		goto done;
3189 	}
3190 
3191 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3192 	if (!cmd) {
3193 		err = -ENOMEM;
3194 		goto done;
3195 	}
3196 
3197 	cmd->cmd_complete = addr_cmd_complete;
3198 
3199 	/* Continue with pairing via HCI */
3200 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3201 		struct hci_cp_user_passkey_reply cp;
3202 
3203 		bacpy(&cp.bdaddr, &addr->bdaddr);
3204 		cp.passkey = passkey;
3205 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3206 	} else
3207 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3208 				   &addr->bdaddr);
3209 
3210 	if (err < 0)
3211 		mgmt_pending_remove(cmd);
3212 
3213 done:
3214 	hci_dev_unlock(hdev);
3215 	return err;
3216 }
3217 
3218 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3219 			      void *data, u16 len)
3220 {
3221 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3222 
3223 	bt_dev_dbg(hdev, "sock %p", sk);
3224 
3225 	return user_pairing_resp(sk, hdev, &cp->addr,
3226 				MGMT_OP_PIN_CODE_NEG_REPLY,
3227 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3228 }
3229 
3230 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3231 			      u16 len)
3232 {
3233 	struct mgmt_cp_user_confirm_reply *cp = data;
3234 
3235 	bt_dev_dbg(hdev, "sock %p", sk);
3236 
3237 	if (len != sizeof(*cp))
3238 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3239 				       MGMT_STATUS_INVALID_PARAMS);
3240 
3241 	return user_pairing_resp(sk, hdev, &cp->addr,
3242 				 MGMT_OP_USER_CONFIRM_REPLY,
3243 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3244 }
3245 
3246 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3247 				  void *data, u16 len)
3248 {
3249 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3250 
3251 	bt_dev_dbg(hdev, "sock %p", sk);
3252 
3253 	return user_pairing_resp(sk, hdev, &cp->addr,
3254 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3255 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3256 }
3257 
3258 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3259 			      u16 len)
3260 {
3261 	struct mgmt_cp_user_passkey_reply *cp = data;
3262 
3263 	bt_dev_dbg(hdev, "sock %p", sk);
3264 
3265 	return user_pairing_resp(sk, hdev, &cp->addr,
3266 				 MGMT_OP_USER_PASSKEY_REPLY,
3267 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3268 }
3269 
3270 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3271 				  void *data, u16 len)
3272 {
3273 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3274 
3275 	bt_dev_dbg(hdev, "sock %p", sk);
3276 
3277 	return user_pairing_resp(sk, hdev, &cp->addr,
3278 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3279 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3280 }
3281 
3282 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3283 {
3284 	struct adv_info *adv_instance;
3285 
3286 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3287 	if (!adv_instance)
3288 		return 0;
3289 
3290 	/* stop if current instance doesn't need to be changed */
3291 	if (!(adv_instance->flags & flags))
3292 		return 0;
3293 
3294 	cancel_adv_timeout(hdev);
3295 
3296 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3297 	if (!adv_instance)
3298 		return 0;
3299 
3300 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3301 
3302 	return 0;
3303 }
3304 
3305 static int name_changed_sync(struct hci_dev *hdev, void *data)
3306 {
3307 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3308 }
3309 
3310 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3311 {
3312 	struct mgmt_pending_cmd *cmd = data;
3313 	struct mgmt_cp_set_local_name *cp = cmd->param;
3314 	u8 status = mgmt_status(err);
3315 
3316 	bt_dev_dbg(hdev, "err %d", err);
3317 
3318 	if (status) {
3319 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3320 				status);
3321 	} else {
3322 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3323 				  cp, sizeof(*cp));
3324 
3325 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3326 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3327 	}
3328 
3329 	mgmt_pending_remove(cmd);
3330 }
3331 
3332 static int set_name_sync(struct hci_dev *hdev, void *data)
3333 {
3334 	if (lmp_bredr_capable(hdev)) {
3335 		hci_update_name_sync(hdev);
3336 		hci_update_eir_sync(hdev);
3337 	}
3338 
3339 	/* The name is stored in the scan response data and so
3340 	 * no need to update the advertising data here.
3341 	 */
3342 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3343 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3344 
3345 	return 0;
3346 }
3347 
3348 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3349 			  u16 len)
3350 {
3351 	struct mgmt_cp_set_local_name *cp = data;
3352 	struct mgmt_pending_cmd *cmd;
3353 	int err;
3354 
3355 	bt_dev_dbg(hdev, "sock %p", sk);
3356 
3357 	hci_dev_lock(hdev);
3358 
3359 	/* If the old values are the same as the new ones just return a
3360 	 * direct command complete event.
3361 	 */
3362 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3363 	    !memcmp(hdev->short_name, cp->short_name,
3364 		    sizeof(hdev->short_name))) {
3365 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3366 					data, len);
3367 		goto failed;
3368 	}
3369 
3370 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3371 
3372 	if (!hdev_is_powered(hdev)) {
3373 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3374 
3375 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3376 					data, len);
3377 		if (err < 0)
3378 			goto failed;
3379 
3380 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3381 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3382 		ext_info_changed(hdev, sk);
3383 
3384 		goto failed;
3385 	}
3386 
3387 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3388 	if (!cmd)
3389 		err = -ENOMEM;
3390 	else
3391 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3392 					 set_name_complete);
3393 
3394 	if (err < 0) {
3395 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3396 				      MGMT_STATUS_FAILED);
3397 
3398 		if (cmd)
3399 			mgmt_pending_remove(cmd);
3400 
3401 		goto failed;
3402 	}
3403 
3404 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3405 
3406 failed:
3407 	hci_dev_unlock(hdev);
3408 	return err;
3409 }
3410 
3411 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3412 {
3413 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3414 }
3415 
3416 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3417 			  u16 len)
3418 {
3419 	struct mgmt_cp_set_appearance *cp = data;
3420 	u16 appearance;
3421 	int err;
3422 
3423 	bt_dev_dbg(hdev, "sock %p", sk);
3424 
3425 	if (!lmp_le_capable(hdev))
3426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3427 				       MGMT_STATUS_NOT_SUPPORTED);
3428 
3429 	appearance = le16_to_cpu(cp->appearance);
3430 
3431 	hci_dev_lock(hdev);
3432 
3433 	if (hdev->appearance != appearance) {
3434 		hdev->appearance = appearance;
3435 
3436 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3437 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3438 					   NULL);
3439 
3440 		ext_info_changed(hdev, sk);
3441 	}
3442 
3443 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3444 				0);
3445 
3446 	hci_dev_unlock(hdev);
3447 
3448 	return err;
3449 }
3450 
3451 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3452 				 void *data, u16 len)
3453 {
3454 	struct mgmt_rp_get_phy_configuration rp;
3455 
3456 	bt_dev_dbg(hdev, "sock %p", sk);
3457 
3458 	hci_dev_lock(hdev);
3459 
3460 	memset(&rp, 0, sizeof(rp));
3461 
3462 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3463 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3464 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3465 
3466 	hci_dev_unlock(hdev);
3467 
3468 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3469 				 &rp, sizeof(rp));
3470 }
3471 
3472 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3473 {
3474 	struct mgmt_ev_phy_configuration_changed ev;
3475 
3476 	memset(&ev, 0, sizeof(ev));
3477 
3478 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3479 
3480 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3481 			  sizeof(ev), skip);
3482 }
3483 
3484 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3485 {
3486 	struct mgmt_pending_cmd *cmd = data;
3487 	struct sk_buff *skb = cmd->skb;
3488 	u8 status = mgmt_status(err);
3489 
3490 	if (!status) {
3491 		if (!skb)
3492 			status = MGMT_STATUS_FAILED;
3493 		else if (IS_ERR(skb))
3494 			status = mgmt_status(PTR_ERR(skb));
3495 		else
3496 			status = mgmt_status(skb->data[0]);
3497 	}
3498 
3499 	bt_dev_dbg(hdev, "status %d", status);
3500 
3501 	if (status) {
3502 		mgmt_cmd_status(cmd->sk, hdev->id,
3503 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3504 	} else {
3505 		mgmt_cmd_complete(cmd->sk, hdev->id,
3506 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3507 				  NULL, 0);
3508 
3509 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3510 	}
3511 
3512 	if (skb && !IS_ERR(skb))
3513 		kfree_skb(skb);
3514 
3515 	mgmt_pending_remove(cmd);
3516 }
3517 
3518 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3519 {
3520 	struct mgmt_pending_cmd *cmd = data;
3521 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3522 	struct hci_cp_le_set_default_phy cp_phy;
3523 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3524 
3525 	memset(&cp_phy, 0, sizeof(cp_phy));
3526 
3527 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3528 		cp_phy.all_phys |= 0x01;
3529 
3530 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3531 		cp_phy.all_phys |= 0x02;
3532 
3533 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3534 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3535 
3536 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3537 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3538 
3539 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3540 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3541 
3542 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3543 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3544 
3545 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3546 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3547 
3548 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3549 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3550 
3551 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3552 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3553 
3554 	return 0;
3555 }
3556 
3557 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3558 				 void *data, u16 len)
3559 {
3560 	struct mgmt_cp_set_phy_configuration *cp = data;
3561 	struct mgmt_pending_cmd *cmd;
3562 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3563 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3564 	bool changed = false;
3565 	int err;
3566 
3567 	bt_dev_dbg(hdev, "sock %p", sk);
3568 
3569 	configurable_phys = get_configurable_phys(hdev);
3570 	supported_phys = get_supported_phys(hdev);
3571 	selected_phys = __le32_to_cpu(cp->selected_phys);
3572 
3573 	if (selected_phys & ~supported_phys)
3574 		return mgmt_cmd_status(sk, hdev->id,
3575 				       MGMT_OP_SET_PHY_CONFIGURATION,
3576 				       MGMT_STATUS_INVALID_PARAMS);
3577 
3578 	unconfigure_phys = supported_phys & ~configurable_phys;
3579 
3580 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3581 		return mgmt_cmd_status(sk, hdev->id,
3582 				       MGMT_OP_SET_PHY_CONFIGURATION,
3583 				       MGMT_STATUS_INVALID_PARAMS);
3584 
3585 	if (selected_phys == get_selected_phys(hdev))
3586 		return mgmt_cmd_complete(sk, hdev->id,
3587 					 MGMT_OP_SET_PHY_CONFIGURATION,
3588 					 0, NULL, 0);
3589 
3590 	hci_dev_lock(hdev);
3591 
3592 	if (!hdev_is_powered(hdev)) {
3593 		err = mgmt_cmd_status(sk, hdev->id,
3594 				      MGMT_OP_SET_PHY_CONFIGURATION,
3595 				      MGMT_STATUS_REJECTED);
3596 		goto unlock;
3597 	}
3598 
3599 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3600 		err = mgmt_cmd_status(sk, hdev->id,
3601 				      MGMT_OP_SET_PHY_CONFIGURATION,
3602 				      MGMT_STATUS_BUSY);
3603 		goto unlock;
3604 	}
3605 
3606 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3607 		pkt_type |= (HCI_DH3 | HCI_DM3);
3608 	else
3609 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3610 
3611 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3612 		pkt_type |= (HCI_DH5 | HCI_DM5);
3613 	else
3614 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3615 
3616 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3617 		pkt_type &= ~HCI_2DH1;
3618 	else
3619 		pkt_type |= HCI_2DH1;
3620 
3621 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3622 		pkt_type &= ~HCI_2DH3;
3623 	else
3624 		pkt_type |= HCI_2DH3;
3625 
3626 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3627 		pkt_type &= ~HCI_2DH5;
3628 	else
3629 		pkt_type |= HCI_2DH5;
3630 
3631 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3632 		pkt_type &= ~HCI_3DH1;
3633 	else
3634 		pkt_type |= HCI_3DH1;
3635 
3636 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3637 		pkt_type &= ~HCI_3DH3;
3638 	else
3639 		pkt_type |= HCI_3DH3;
3640 
3641 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3642 		pkt_type &= ~HCI_3DH5;
3643 	else
3644 		pkt_type |= HCI_3DH5;
3645 
3646 	if (pkt_type != hdev->pkt_type) {
3647 		hdev->pkt_type = pkt_type;
3648 		changed = true;
3649 	}
3650 
3651 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3652 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3653 		if (changed)
3654 			mgmt_phy_configuration_changed(hdev, sk);
3655 
3656 		err = mgmt_cmd_complete(sk, hdev->id,
3657 					MGMT_OP_SET_PHY_CONFIGURATION,
3658 					0, NULL, 0);
3659 
3660 		goto unlock;
3661 	}
3662 
3663 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3664 			       len);
3665 	if (!cmd)
3666 		err = -ENOMEM;
3667 	else
3668 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3669 					 set_default_phy_complete);
3670 
3671 	if (err < 0) {
3672 		err = mgmt_cmd_status(sk, hdev->id,
3673 				      MGMT_OP_SET_PHY_CONFIGURATION,
3674 				      MGMT_STATUS_FAILED);
3675 
3676 		if (cmd)
3677 			mgmt_pending_remove(cmd);
3678 	}
3679 
3680 unlock:
3681 	hci_dev_unlock(hdev);
3682 
3683 	return err;
3684 }
3685 
3686 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3687 			    u16 len)
3688 {
3689 	int err = MGMT_STATUS_SUCCESS;
3690 	struct mgmt_cp_set_blocked_keys *keys = data;
3691 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3692 				   sizeof(struct mgmt_blocked_key_info));
3693 	u16 key_count, expected_len;
3694 	int i;
3695 
3696 	bt_dev_dbg(hdev, "sock %p", sk);
3697 
3698 	key_count = __le16_to_cpu(keys->key_count);
3699 	if (key_count > max_key_count) {
3700 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3701 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3702 				       MGMT_STATUS_INVALID_PARAMS);
3703 	}
3704 
3705 	expected_len = struct_size(keys, keys, key_count);
3706 	if (expected_len != len) {
3707 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3708 			   expected_len, len);
3709 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3710 				       MGMT_STATUS_INVALID_PARAMS);
3711 	}
3712 
3713 	hci_dev_lock(hdev);
3714 
3715 	hci_blocked_keys_clear(hdev);
3716 
3717 	for (i = 0; i < keys->key_count; ++i) {
3718 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3719 
3720 		if (!b) {
3721 			err = MGMT_STATUS_NO_RESOURCES;
3722 			break;
3723 		}
3724 
3725 		b->type = keys->keys[i].type;
3726 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3727 		list_add_rcu(&b->list, &hdev->blocked_keys);
3728 	}
3729 	hci_dev_unlock(hdev);
3730 
3731 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3732 				err, NULL, 0);
3733 }
3734 
3735 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3736 			       void *data, u16 len)
3737 {
3738 	struct mgmt_mode *cp = data;
3739 	int err;
3740 	bool changed = false;
3741 
3742 	bt_dev_dbg(hdev, "sock %p", sk);
3743 
3744 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3745 		return mgmt_cmd_status(sk, hdev->id,
3746 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3747 				       MGMT_STATUS_NOT_SUPPORTED);
3748 
3749 	if (cp->val != 0x00 && cp->val != 0x01)
3750 		return mgmt_cmd_status(sk, hdev->id,
3751 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3752 				       MGMT_STATUS_INVALID_PARAMS);
3753 
3754 	hci_dev_lock(hdev);
3755 
3756 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3757 		err = mgmt_cmd_status(sk, hdev->id,
3758 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3759 				      MGMT_STATUS_BUSY);
3760 		goto unlock;
3761 	}
3762 
3763 	if (hdev_is_powered(hdev) &&
3764 	    !!cp->val != hci_dev_test_flag(hdev,
3765 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3766 		err = mgmt_cmd_status(sk, hdev->id,
3767 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3768 				      MGMT_STATUS_REJECTED);
3769 		goto unlock;
3770 	}
3771 
3772 	if (cp->val)
3773 		changed = !hci_dev_test_and_set_flag(hdev,
3774 						   HCI_WIDEBAND_SPEECH_ENABLED);
3775 	else
3776 		changed = hci_dev_test_and_clear_flag(hdev,
3777 						   HCI_WIDEBAND_SPEECH_ENABLED);
3778 
3779 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3780 	if (err < 0)
3781 		goto unlock;
3782 
3783 	if (changed)
3784 		err = new_settings(hdev, sk);
3785 
3786 unlock:
3787 	hci_dev_unlock(hdev);
3788 	return err;
3789 }
3790 
3791 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3792 			       void *data, u16 data_len)
3793 {
3794 	char buf[20];
3795 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3796 	u16 cap_len = 0;
3797 	u8 flags = 0;
3798 	u8 tx_power_range[2];
3799 
3800 	bt_dev_dbg(hdev, "sock %p", sk);
3801 
3802 	memset(&buf, 0, sizeof(buf));
3803 
3804 	hci_dev_lock(hdev);
3805 
3806 	/* When the Read Simple Pairing Options command is supported, then
3807 	 * the remote public key validation is supported.
3808 	 *
3809 	 * Alternatively, when Microsoft extensions are available, they can
3810 	 * indicate support for public key validation as well.
3811 	 */
3812 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3813 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3814 
3815 	flags |= 0x02;		/* Remote public key validation (LE) */
3816 
3817 	/* When the Read Encryption Key Size command is supported, then the
3818 	 * encryption key size is enforced.
3819 	 */
3820 	if (hdev->commands[20] & 0x10)
3821 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3822 
3823 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3824 
3825 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3826 				  &flags, 1);
3827 
3828 	/* When the Read Simple Pairing Options command is supported, then
3829 	 * also max encryption key size information is provided.
3830 	 */
3831 	if (hdev->commands[41] & 0x08)
3832 		cap_len = eir_append_le16(rp->cap, cap_len,
3833 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3834 					  hdev->max_enc_key_size);
3835 
3836 	cap_len = eir_append_le16(rp->cap, cap_len,
3837 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3838 				  SMP_MAX_ENC_KEY_SIZE);
3839 
3840 	/* Append the min/max LE tx power parameters if we were able to fetch
3841 	 * it from the controller
3842 	 */
3843 	if (hdev->commands[38] & 0x80) {
3844 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3845 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3846 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3847 					  tx_power_range, 2);
3848 	}
3849 
3850 	rp->cap_len = cpu_to_le16(cap_len);
3851 
3852 	hci_dev_unlock(hdev);
3853 
3854 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3855 				 rp, sizeof(*rp) + cap_len);
3856 }
3857 
3858 #ifdef CONFIG_BT_FEATURE_DEBUG
3859 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3860 static const u8 debug_uuid[16] = {
3861 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3862 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3863 };
3864 #endif
3865 
3866 /* 330859bc-7506-492d-9370-9a6f0614037f */
3867 static const u8 quality_report_uuid[16] = {
3868 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3869 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3870 };
3871 
3872 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3873 static const u8 offload_codecs_uuid[16] = {
3874 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3875 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3876 };
3877 
3878 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3879 static const u8 simult_central_periph_uuid[16] = {
3880 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3881 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3882 };
3883 
3884 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3885 static const u8 rpa_resolution_uuid[16] = {
3886 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3887 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3888 };
3889 
3890 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3891 				  void *data, u16 data_len)
3892 {
3893 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3894 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3895 	u16 idx = 0;
3896 	u32 flags;
3897 
3898 	bt_dev_dbg(hdev, "sock %p", sk);
3899 
3900 	memset(&buf, 0, sizeof(buf));
3901 
3902 #ifdef CONFIG_BT_FEATURE_DEBUG
3903 	if (!hdev) {
3904 		flags = bt_dbg_get() ? BIT(0) : 0;
3905 
3906 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3907 		rp->features[idx].flags = cpu_to_le32(flags);
3908 		idx++;
3909 	}
3910 #endif
3911 
3912 	if (hdev) {
3913 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3914 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3915 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3916 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3917 			flags = BIT(0);
3918 		else
3919 			flags = 0;
3920 
3921 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3922 		rp->features[idx].flags = cpu_to_le32(flags);
3923 		idx++;
3924 	}
3925 
3926 	if (hdev && ll_privacy_capable(hdev)) {
3927 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3928 			flags = BIT(0) | BIT(1);
3929 		else
3930 			flags = BIT(1);
3931 
3932 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3933 		rp->features[idx].flags = cpu_to_le32(flags);
3934 		idx++;
3935 	}
3936 
3937 	if (hdev && (aosp_has_quality_report(hdev) ||
3938 		     hdev->set_quality_report)) {
3939 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3940 			flags = BIT(0);
3941 		else
3942 			flags = 0;
3943 
3944 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 
3949 	if (hdev && hdev->get_data_path_id) {
3950 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3951 			flags = BIT(0);
3952 		else
3953 			flags = 0;
3954 
3955 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3956 		rp->features[idx].flags = cpu_to_le32(flags);
3957 		idx++;
3958 	}
3959 
3960 	rp->feature_count = cpu_to_le16(idx);
3961 
3962 	/* After reading the experimental features information, enable
3963 	 * the events to update client on any future change.
3964 	 */
3965 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3966 
3967 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3968 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3969 				 0, rp, sizeof(*rp) + (20 * idx));
3970 }
3971 
3972 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3973 					  struct sock *skip)
3974 {
3975 	struct mgmt_ev_exp_feature_changed ev;
3976 
3977 	memset(&ev, 0, sizeof(ev));
3978 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3979 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3980 
3981 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3982 				  &ev, sizeof(ev),
3983 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3984 
3985 }
3986 
3987 #ifdef CONFIG_BT_FEATURE_DEBUG
3988 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3989 {
3990 	struct mgmt_ev_exp_feature_changed ev;
3991 
3992 	memset(&ev, 0, sizeof(ev));
3993 	memcpy(ev.uuid, debug_uuid, 16);
3994 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3995 
3996 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3997 				  &ev, sizeof(ev),
3998 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3999 }
4000 #endif
4001 
4002 static int exp_quality_report_feature_changed(bool enabled,
4003 					      struct hci_dev *hdev,
4004 					      struct sock *skip)
4005 {
4006 	struct mgmt_ev_exp_feature_changed ev;
4007 
4008 	memset(&ev, 0, sizeof(ev));
4009 	memcpy(ev.uuid, quality_report_uuid, 16);
4010 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4011 
4012 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4013 				  &ev, sizeof(ev),
4014 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4015 }
4016 
4017 #define EXP_FEAT(_uuid, _set_func)	\
4018 {					\
4019 	.uuid = _uuid,			\
4020 	.set_func = _set_func,		\
4021 }
4022 
4023 /* The zero key uuid is special. Multiple exp features are set through it. */
4024 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4025 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4026 {
4027 	struct mgmt_rp_set_exp_feature rp;
4028 
4029 	memset(rp.uuid, 0, 16);
4030 	rp.flags = cpu_to_le32(0);
4031 
4032 #ifdef CONFIG_BT_FEATURE_DEBUG
4033 	if (!hdev) {
4034 		bool changed = bt_dbg_get();
4035 
4036 		bt_dbg_set(false);
4037 
4038 		if (changed)
4039 			exp_debug_feature_changed(false, sk);
4040 	}
4041 #endif
4042 
4043 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4044 		bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4045 
4046 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4047 
4048 		if (changed)
4049 			exp_ll_privacy_feature_changed(false, hdev, sk);
4050 	}
4051 
4052 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4053 
4054 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4055 				 MGMT_OP_SET_EXP_FEATURE, 0,
4056 				 &rp, sizeof(rp));
4057 }
4058 
4059 #ifdef CONFIG_BT_FEATURE_DEBUG
4060 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4061 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4062 {
4063 	struct mgmt_rp_set_exp_feature rp;
4064 
4065 	bool val, changed;
4066 	int err;
4067 
4068 	/* Command requires to use the non-controller index */
4069 	if (hdev)
4070 		return mgmt_cmd_status(sk, hdev->id,
4071 				       MGMT_OP_SET_EXP_FEATURE,
4072 				       MGMT_STATUS_INVALID_INDEX);
4073 
4074 	/* Parameters are limited to a single octet */
4075 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4076 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4077 				       MGMT_OP_SET_EXP_FEATURE,
4078 				       MGMT_STATUS_INVALID_PARAMS);
4079 
4080 	/* Only boolean on/off is supported */
4081 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4082 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4083 				       MGMT_OP_SET_EXP_FEATURE,
4084 				       MGMT_STATUS_INVALID_PARAMS);
4085 
4086 	val = !!cp->param[0];
4087 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4088 	bt_dbg_set(val);
4089 
4090 	memcpy(rp.uuid, debug_uuid, 16);
4091 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4092 
4093 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4094 
4095 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4096 				MGMT_OP_SET_EXP_FEATURE, 0,
4097 				&rp, sizeof(rp));
4098 
4099 	if (changed)
4100 		exp_debug_feature_changed(val, sk);
4101 
4102 	return err;
4103 }
4104 #endif
4105 
4106 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4107 				   struct mgmt_cp_set_exp_feature *cp,
4108 				   u16 data_len)
4109 {
4110 	struct mgmt_rp_set_exp_feature rp;
4111 	bool val, changed;
4112 	int err;
4113 	u32 flags;
4114 
4115 	/* Command requires to use the controller index */
4116 	if (!hdev)
4117 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4118 				       MGMT_OP_SET_EXP_FEATURE,
4119 				       MGMT_STATUS_INVALID_INDEX);
4120 
4121 	/* Changes can only be made when controller is powered down */
4122 	if (hdev_is_powered(hdev))
4123 		return mgmt_cmd_status(sk, hdev->id,
4124 				       MGMT_OP_SET_EXP_FEATURE,
4125 				       MGMT_STATUS_REJECTED);
4126 
4127 	/* Parameters are limited to a single octet */
4128 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4129 		return mgmt_cmd_status(sk, hdev->id,
4130 				       MGMT_OP_SET_EXP_FEATURE,
4131 				       MGMT_STATUS_INVALID_PARAMS);
4132 
4133 	/* Only boolean on/off is supported */
4134 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4135 		return mgmt_cmd_status(sk, hdev->id,
4136 				       MGMT_OP_SET_EXP_FEATURE,
4137 				       MGMT_STATUS_INVALID_PARAMS);
4138 
4139 	val = !!cp->param[0];
4140 
4141 	if (val) {
4142 		changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4143 		hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4144 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4145 
4146 		/* Enable LL privacy + supported settings changed */
4147 		flags = BIT(0) | BIT(1);
4148 	} else {
4149 		changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4150 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4151 
4152 		/* Disable LL privacy + supported settings changed */
4153 		flags = BIT(1);
4154 	}
4155 
4156 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4157 	rp.flags = cpu_to_le32(flags);
4158 
4159 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4160 
4161 	err = mgmt_cmd_complete(sk, hdev->id,
4162 				MGMT_OP_SET_EXP_FEATURE, 0,
4163 				&rp, sizeof(rp));
4164 
4165 	if (changed)
4166 		exp_ll_privacy_feature_changed(val, hdev, sk);
4167 
4168 	return err;
4169 }
4170 
4171 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4172 				   struct mgmt_cp_set_exp_feature *cp,
4173 				   u16 data_len)
4174 {
4175 	struct mgmt_rp_set_exp_feature rp;
4176 	bool val, changed;
4177 	int err;
4178 
4179 	/* Command requires to use a valid controller index */
4180 	if (!hdev)
4181 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182 				       MGMT_OP_SET_EXP_FEATURE,
4183 				       MGMT_STATUS_INVALID_INDEX);
4184 
4185 	/* Parameters are limited to a single octet */
4186 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4187 		return mgmt_cmd_status(sk, hdev->id,
4188 				       MGMT_OP_SET_EXP_FEATURE,
4189 				       MGMT_STATUS_INVALID_PARAMS);
4190 
4191 	/* Only boolean on/off is supported */
4192 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4193 		return mgmt_cmd_status(sk, hdev->id,
4194 				       MGMT_OP_SET_EXP_FEATURE,
4195 				       MGMT_STATUS_INVALID_PARAMS);
4196 
4197 	hci_req_sync_lock(hdev);
4198 
4199 	val = !!cp->param[0];
4200 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4201 
4202 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4203 		err = mgmt_cmd_status(sk, hdev->id,
4204 				      MGMT_OP_SET_EXP_FEATURE,
4205 				      MGMT_STATUS_NOT_SUPPORTED);
4206 		goto unlock_quality_report;
4207 	}
4208 
4209 	if (changed) {
4210 		if (hdev->set_quality_report)
4211 			err = hdev->set_quality_report(hdev, val);
4212 		else
4213 			err = aosp_set_quality_report(hdev, val);
4214 
4215 		if (err) {
4216 			err = mgmt_cmd_status(sk, hdev->id,
4217 					      MGMT_OP_SET_EXP_FEATURE,
4218 					      MGMT_STATUS_FAILED);
4219 			goto unlock_quality_report;
4220 		}
4221 
4222 		if (val)
4223 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4224 		else
4225 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4226 	}
4227 
4228 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4229 
4230 	memcpy(rp.uuid, quality_report_uuid, 16);
4231 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4232 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4233 
4234 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4235 				&rp, sizeof(rp));
4236 
4237 	if (changed)
4238 		exp_quality_report_feature_changed(val, hdev, sk);
4239 
4240 unlock_quality_report:
4241 	hci_req_sync_unlock(hdev);
4242 	return err;
4243 }
4244 
4245 static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev,
4246 					     struct sock *skip)
4247 {
4248 	struct mgmt_ev_exp_feature_changed ev;
4249 
4250 	memset(&ev, 0, sizeof(ev));
4251 	memcpy(ev.uuid, offload_codecs_uuid, 16);
4252 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4253 
4254 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4255 				  &ev, sizeof(ev),
4256 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4257 }
4258 
4259 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4260 				  struct mgmt_cp_set_exp_feature *cp,
4261 				  u16 data_len)
4262 {
4263 	bool val, changed;
4264 	int err;
4265 	struct mgmt_rp_set_exp_feature rp;
4266 
4267 	/* Command requires to use a valid controller index */
4268 	if (!hdev)
4269 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4270 				       MGMT_OP_SET_EXP_FEATURE,
4271 				       MGMT_STATUS_INVALID_INDEX);
4272 
4273 	/* Parameters are limited to a single octet */
4274 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4275 		return mgmt_cmd_status(sk, hdev->id,
4276 				       MGMT_OP_SET_EXP_FEATURE,
4277 				       MGMT_STATUS_INVALID_PARAMS);
4278 
4279 	/* Only boolean on/off is supported */
4280 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4281 		return mgmt_cmd_status(sk, hdev->id,
4282 				       MGMT_OP_SET_EXP_FEATURE,
4283 				       MGMT_STATUS_INVALID_PARAMS);
4284 
4285 	val = !!cp->param[0];
4286 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4287 
4288 	if (!hdev->get_data_path_id) {
4289 		return mgmt_cmd_status(sk, hdev->id,
4290 				       MGMT_OP_SET_EXP_FEATURE,
4291 				       MGMT_STATUS_NOT_SUPPORTED);
4292 	}
4293 
4294 	if (changed) {
4295 		if (val)
4296 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4297 		else
4298 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4299 	}
4300 
4301 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4302 		    val, changed);
4303 
4304 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4305 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4306 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4307 	err = mgmt_cmd_complete(sk, hdev->id,
4308 				MGMT_OP_SET_EXP_FEATURE, 0,
4309 				&rp, sizeof(rp));
4310 
4311 	if (changed)
4312 		exp_offload_codec_feature_changed(val, hdev, sk);
4313 
4314 	return err;
4315 }
4316 
4317 static const struct mgmt_exp_feature {
4318 	const u8 *uuid;
4319 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4320 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4321 } exp_features[] = {
4322 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4323 #ifdef CONFIG_BT_FEATURE_DEBUG
4324 	EXP_FEAT(debug_uuid, set_debug_func),
4325 #endif
4326 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4327 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4328 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4329 
4330 	/* end with a null feature */
4331 	EXP_FEAT(NULL, NULL)
4332 };
4333 
4334 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4335 			   void *data, u16 data_len)
4336 {
4337 	struct mgmt_cp_set_exp_feature *cp = data;
4338 	size_t i = 0;
4339 
4340 	bt_dev_dbg(hdev, "sock %p", sk);
4341 
4342 	for (i = 0; exp_features[i].uuid; i++) {
4343 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4344 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4345 	}
4346 
4347 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4348 			       MGMT_OP_SET_EXP_FEATURE,
4349 			       MGMT_STATUS_NOT_SUPPORTED);
4350 }
4351 
4352 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4353 
4354 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4355 			    u16 data_len)
4356 {
4357 	struct mgmt_cp_get_device_flags *cp = data;
4358 	struct mgmt_rp_get_device_flags rp;
4359 	struct bdaddr_list_with_flags *br_params;
4360 	struct hci_conn_params *params;
4361 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4362 	u32 current_flags = 0;
4363 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4364 
4365 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4366 		   &cp->addr.bdaddr, cp->addr.type);
4367 
4368 	hci_dev_lock(hdev);
4369 
4370 	memset(&rp, 0, sizeof(rp));
4371 
4372 	if (cp->addr.type == BDADDR_BREDR) {
4373 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4374 							      &cp->addr.bdaddr,
4375 							      cp->addr.type);
4376 		if (!br_params)
4377 			goto done;
4378 
4379 		current_flags = br_params->current_flags;
4380 	} else {
4381 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4382 						le_addr_type(cp->addr.type));
4383 
4384 		if (!params)
4385 			goto done;
4386 
4387 		current_flags = params->current_flags;
4388 	}
4389 
4390 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4391 	rp.addr.type = cp->addr.type;
4392 	rp.supported_flags = cpu_to_le32(supported_flags);
4393 	rp.current_flags = cpu_to_le32(current_flags);
4394 
4395 	status = MGMT_STATUS_SUCCESS;
4396 
4397 done:
4398 	hci_dev_unlock(hdev);
4399 
4400 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4401 				&rp, sizeof(rp));
4402 }
4403 
4404 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4405 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4406 				 u32 supported_flags, u32 current_flags)
4407 {
4408 	struct mgmt_ev_device_flags_changed ev;
4409 
4410 	bacpy(&ev.addr.bdaddr, bdaddr);
4411 	ev.addr.type = bdaddr_type;
4412 	ev.supported_flags = cpu_to_le32(supported_flags);
4413 	ev.current_flags = cpu_to_le32(current_flags);
4414 
4415 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4416 }
4417 
4418 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4419 			    u16 len)
4420 {
4421 	struct mgmt_cp_set_device_flags *cp = data;
4422 	struct bdaddr_list_with_flags *br_params;
4423 	struct hci_conn_params *params;
4424 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4425 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4426 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4427 
4428 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4429 		   &cp->addr.bdaddr, cp->addr.type,
4430 		   __le32_to_cpu(current_flags));
4431 
4432 	if ((supported_flags | current_flags) != supported_flags) {
4433 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4434 			    current_flags, supported_flags);
4435 		goto done;
4436 	}
4437 
4438 	hci_dev_lock(hdev);
4439 
4440 	if (cp->addr.type == BDADDR_BREDR) {
4441 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4442 							      &cp->addr.bdaddr,
4443 							      cp->addr.type);
4444 
4445 		if (br_params) {
4446 			br_params->current_flags = current_flags;
4447 			status = MGMT_STATUS_SUCCESS;
4448 		} else {
4449 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4450 				    &cp->addr.bdaddr, cp->addr.type);
4451 		}
4452 	} else {
4453 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4454 						le_addr_type(cp->addr.type));
4455 		if (params) {
4456 			params->current_flags = current_flags;
4457 			status = MGMT_STATUS_SUCCESS;
4458 		} else {
4459 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4460 				    &cp->addr.bdaddr,
4461 				    le_addr_type(cp->addr.type));
4462 		}
4463 	}
4464 
4465 done:
4466 	hci_dev_unlock(hdev);
4467 
4468 	if (status == MGMT_STATUS_SUCCESS)
4469 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4470 				     supported_flags, current_flags);
4471 
4472 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4473 				 &cp->addr, sizeof(cp->addr));
4474 }
4475 
4476 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4477 				   u16 handle)
4478 {
4479 	struct mgmt_ev_adv_monitor_added ev;
4480 
4481 	ev.monitor_handle = cpu_to_le16(handle);
4482 
4483 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4484 }
4485 
4486 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4487 {
4488 	struct mgmt_ev_adv_monitor_removed ev;
4489 	struct mgmt_pending_cmd *cmd;
4490 	struct sock *sk_skip = NULL;
4491 	struct mgmt_cp_remove_adv_monitor *cp;
4492 
4493 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4494 	if (cmd) {
4495 		cp = cmd->param;
4496 
4497 		if (cp->monitor_handle)
4498 			sk_skip = cmd->sk;
4499 	}
4500 
4501 	ev.monitor_handle = cpu_to_le16(handle);
4502 
4503 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4504 }
4505 
4506 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4507 				 void *data, u16 len)
4508 {
4509 	struct adv_monitor *monitor = NULL;
4510 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4511 	int handle, err;
4512 	size_t rp_size = 0;
4513 	__u32 supported = 0;
4514 	__u32 enabled = 0;
4515 	__u16 num_handles = 0;
4516 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4517 
4518 	BT_DBG("request for %s", hdev->name);
4519 
4520 	hci_dev_lock(hdev);
4521 
4522 	if (msft_monitor_supported(hdev))
4523 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4524 
4525 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4526 		handles[num_handles++] = monitor->handle;
4527 
4528 	hci_dev_unlock(hdev);
4529 
4530 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4531 	rp = kmalloc(rp_size, GFP_KERNEL);
4532 	if (!rp)
4533 		return -ENOMEM;
4534 
4535 	/* All supported features are currently enabled */
4536 	enabled = supported;
4537 
4538 	rp->supported_features = cpu_to_le32(supported);
4539 	rp->enabled_features = cpu_to_le32(enabled);
4540 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4541 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4542 	rp->num_handles = cpu_to_le16(num_handles);
4543 	if (num_handles)
4544 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4545 
4546 	err = mgmt_cmd_complete(sk, hdev->id,
4547 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4548 				MGMT_STATUS_SUCCESS, rp, rp_size);
4549 
4550 	kfree(rp);
4551 
4552 	return err;
4553 }
4554 
4555 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4556 {
4557 	struct mgmt_rp_add_adv_patterns_monitor rp;
4558 	struct mgmt_pending_cmd *cmd;
4559 	struct adv_monitor *monitor;
4560 	int err = 0;
4561 
4562 	hci_dev_lock(hdev);
4563 
4564 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4565 	if (!cmd) {
4566 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4567 		if (!cmd)
4568 			goto done;
4569 	}
4570 
4571 	monitor = cmd->user_data;
4572 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4573 
4574 	if (!status) {
4575 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4576 		hdev->adv_monitors_cnt++;
4577 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4578 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4579 		hci_update_passive_scan(hdev);
4580 	}
4581 
4582 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4583 				mgmt_status(status), &rp, sizeof(rp));
4584 	mgmt_pending_remove(cmd);
4585 
4586 done:
4587 	hci_dev_unlock(hdev);
4588 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4589 		   rp.monitor_handle, status);
4590 
4591 	return err;
4592 }
4593 
4594 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4595 				      struct adv_monitor *m, u8 status,
4596 				      void *data, u16 len, u16 op)
4597 {
4598 	struct mgmt_rp_add_adv_patterns_monitor rp;
4599 	struct mgmt_pending_cmd *cmd;
4600 	int err;
4601 	bool pending;
4602 
4603 	hci_dev_lock(hdev);
4604 
4605 	if (status)
4606 		goto unlock;
4607 
4608 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4609 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4610 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4611 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4612 		status = MGMT_STATUS_BUSY;
4613 		goto unlock;
4614 	}
4615 
4616 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4617 	if (!cmd) {
4618 		status = MGMT_STATUS_NO_RESOURCES;
4619 		goto unlock;
4620 	}
4621 
4622 	cmd->user_data = m;
4623 	pending = hci_add_adv_monitor(hdev, m, &err);
4624 	if (err) {
4625 		if (err == -ENOSPC || err == -ENOMEM)
4626 			status = MGMT_STATUS_NO_RESOURCES;
4627 		else if (err == -EINVAL)
4628 			status = MGMT_STATUS_INVALID_PARAMS;
4629 		else
4630 			status = MGMT_STATUS_FAILED;
4631 
4632 		mgmt_pending_remove(cmd);
4633 		goto unlock;
4634 	}
4635 
4636 	if (!pending) {
4637 		mgmt_pending_remove(cmd);
4638 		rp.monitor_handle = cpu_to_le16(m->handle);
4639 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4640 		m->state = ADV_MONITOR_STATE_REGISTERED;
4641 		hdev->adv_monitors_cnt++;
4642 
4643 		hci_dev_unlock(hdev);
4644 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4645 					 &rp, sizeof(rp));
4646 	}
4647 
4648 	hci_dev_unlock(hdev);
4649 
4650 	return 0;
4651 
4652 unlock:
4653 	hci_free_adv_monitor(hdev, m);
4654 	hci_dev_unlock(hdev);
4655 	return mgmt_cmd_status(sk, hdev->id, op, status);
4656 }
4657 
4658 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4659 				   struct mgmt_adv_rssi_thresholds *rssi)
4660 {
4661 	if (rssi) {
4662 		m->rssi.low_threshold = rssi->low_threshold;
4663 		m->rssi.low_threshold_timeout =
4664 		    __le16_to_cpu(rssi->low_threshold_timeout);
4665 		m->rssi.high_threshold = rssi->high_threshold;
4666 		m->rssi.high_threshold_timeout =
4667 		    __le16_to_cpu(rssi->high_threshold_timeout);
4668 		m->rssi.sampling_period = rssi->sampling_period;
4669 	} else {
4670 		/* Default values. These numbers are the least constricting
4671 		 * parameters for MSFT API to work, so it behaves as if there
4672 		 * are no rssi parameter to consider. May need to be changed
4673 		 * if other API are to be supported.
4674 		 */
4675 		m->rssi.low_threshold = -127;
4676 		m->rssi.low_threshold_timeout = 60;
4677 		m->rssi.high_threshold = -127;
4678 		m->rssi.high_threshold_timeout = 0;
4679 		m->rssi.sampling_period = 0;
4680 	}
4681 }
4682 
4683 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4684 				    struct mgmt_adv_pattern *patterns)
4685 {
4686 	u8 offset = 0, length = 0;
4687 	struct adv_pattern *p = NULL;
4688 	int i;
4689 
4690 	for (i = 0; i < pattern_count; i++) {
4691 		offset = patterns[i].offset;
4692 		length = patterns[i].length;
4693 		if (offset >= HCI_MAX_AD_LENGTH ||
4694 		    length > HCI_MAX_AD_LENGTH ||
4695 		    (offset + length) > HCI_MAX_AD_LENGTH)
4696 			return MGMT_STATUS_INVALID_PARAMS;
4697 
4698 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4699 		if (!p)
4700 			return MGMT_STATUS_NO_RESOURCES;
4701 
4702 		p->ad_type = patterns[i].ad_type;
4703 		p->offset = patterns[i].offset;
4704 		p->length = patterns[i].length;
4705 		memcpy(p->value, patterns[i].value, p->length);
4706 
4707 		INIT_LIST_HEAD(&p->list);
4708 		list_add(&p->list, &m->patterns);
4709 	}
4710 
4711 	return MGMT_STATUS_SUCCESS;
4712 }
4713 
4714 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4715 				    void *data, u16 len)
4716 {
4717 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4718 	struct adv_monitor *m = NULL;
4719 	u8 status = MGMT_STATUS_SUCCESS;
4720 	size_t expected_size = sizeof(*cp);
4721 
4722 	BT_DBG("request for %s", hdev->name);
4723 
4724 	if (len <= sizeof(*cp)) {
4725 		status = MGMT_STATUS_INVALID_PARAMS;
4726 		goto done;
4727 	}
4728 
4729 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4730 	if (len != expected_size) {
4731 		status = MGMT_STATUS_INVALID_PARAMS;
4732 		goto done;
4733 	}
4734 
4735 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4736 	if (!m) {
4737 		status = MGMT_STATUS_NO_RESOURCES;
4738 		goto done;
4739 	}
4740 
4741 	INIT_LIST_HEAD(&m->patterns);
4742 
4743 	parse_adv_monitor_rssi(m, NULL);
4744 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4745 
4746 done:
4747 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4748 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4749 }
4750 
4751 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4752 					 void *data, u16 len)
4753 {
4754 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4755 	struct adv_monitor *m = NULL;
4756 	u8 status = MGMT_STATUS_SUCCESS;
4757 	size_t expected_size = sizeof(*cp);
4758 
4759 	BT_DBG("request for %s", hdev->name);
4760 
4761 	if (len <= sizeof(*cp)) {
4762 		status = MGMT_STATUS_INVALID_PARAMS;
4763 		goto done;
4764 	}
4765 
4766 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4767 	if (len != expected_size) {
4768 		status = MGMT_STATUS_INVALID_PARAMS;
4769 		goto done;
4770 	}
4771 
4772 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4773 	if (!m) {
4774 		status = MGMT_STATUS_NO_RESOURCES;
4775 		goto done;
4776 	}
4777 
4778 	INIT_LIST_HEAD(&m->patterns);
4779 
4780 	parse_adv_monitor_rssi(m, &cp->rssi);
4781 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4782 
4783 done:
4784 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4785 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4786 }
4787 
4788 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4789 {
4790 	struct mgmt_rp_remove_adv_monitor rp;
4791 	struct mgmt_cp_remove_adv_monitor *cp;
4792 	struct mgmt_pending_cmd *cmd;
4793 	int err = 0;
4794 
4795 	hci_dev_lock(hdev);
4796 
4797 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4798 	if (!cmd)
4799 		goto done;
4800 
4801 	cp = cmd->param;
4802 	rp.monitor_handle = cp->monitor_handle;
4803 
4804 	if (!status)
4805 		hci_update_passive_scan(hdev);
4806 
4807 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4808 				mgmt_status(status), &rp, sizeof(rp));
4809 	mgmt_pending_remove(cmd);
4810 
4811 done:
4812 	hci_dev_unlock(hdev);
4813 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4814 		   rp.monitor_handle, status);
4815 
4816 	return err;
4817 }
4818 
4819 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4820 			      void *data, u16 len)
4821 {
4822 	struct mgmt_cp_remove_adv_monitor *cp = data;
4823 	struct mgmt_rp_remove_adv_monitor rp;
4824 	struct mgmt_pending_cmd *cmd;
4825 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4826 	int err, status;
4827 	bool pending;
4828 
4829 	BT_DBG("request for %s", hdev->name);
4830 	rp.monitor_handle = cp->monitor_handle;
4831 
4832 	hci_dev_lock(hdev);
4833 
4834 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4835 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4836 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4837 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4838 		status = MGMT_STATUS_BUSY;
4839 		goto unlock;
4840 	}
4841 
4842 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4843 	if (!cmd) {
4844 		status = MGMT_STATUS_NO_RESOURCES;
4845 		goto unlock;
4846 	}
4847 
4848 	if (handle)
4849 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4850 	else
4851 		pending = hci_remove_all_adv_monitor(hdev, &err);
4852 
4853 	if (err) {
4854 		mgmt_pending_remove(cmd);
4855 
4856 		if (err == -ENOENT)
4857 			status = MGMT_STATUS_INVALID_INDEX;
4858 		else
4859 			status = MGMT_STATUS_FAILED;
4860 
4861 		goto unlock;
4862 	}
4863 
4864 	/* monitor can be removed without forwarding request to controller */
4865 	if (!pending) {
4866 		mgmt_pending_remove(cmd);
4867 		hci_dev_unlock(hdev);
4868 
4869 		return mgmt_cmd_complete(sk, hdev->id,
4870 					 MGMT_OP_REMOVE_ADV_MONITOR,
4871 					 MGMT_STATUS_SUCCESS,
4872 					 &rp, sizeof(rp));
4873 	}
4874 
4875 	hci_dev_unlock(hdev);
4876 	return 0;
4877 
4878 unlock:
4879 	hci_dev_unlock(hdev);
4880 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4881 			       status);
4882 }
4883 
4884 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4885 {
4886 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4887 	size_t rp_size = sizeof(mgmt_rp);
4888 	struct mgmt_pending_cmd *cmd = data;
4889 	struct sk_buff *skb = cmd->skb;
4890 	u8 status = mgmt_status(err);
4891 
4892 	if (!status) {
4893 		if (!skb)
4894 			status = MGMT_STATUS_FAILED;
4895 		else if (IS_ERR(skb))
4896 			status = mgmt_status(PTR_ERR(skb));
4897 		else
4898 			status = mgmt_status(skb->data[0]);
4899 	}
4900 
4901 	bt_dev_dbg(hdev, "status %d", status);
4902 
4903 	if (status) {
4904 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4905 		goto remove;
4906 	}
4907 
4908 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4909 
4910 	if (!bredr_sc_enabled(hdev)) {
4911 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4912 
4913 		if (skb->len < sizeof(*rp)) {
4914 			mgmt_cmd_status(cmd->sk, hdev->id,
4915 					MGMT_OP_READ_LOCAL_OOB_DATA,
4916 					MGMT_STATUS_FAILED);
4917 			goto remove;
4918 		}
4919 
4920 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4921 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4922 
4923 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4924 	} else {
4925 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4926 
4927 		if (skb->len < sizeof(*rp)) {
4928 			mgmt_cmd_status(cmd->sk, hdev->id,
4929 					MGMT_OP_READ_LOCAL_OOB_DATA,
4930 					MGMT_STATUS_FAILED);
4931 			goto remove;
4932 		}
4933 
4934 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4935 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4936 
4937 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4938 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4939 	}
4940 
4941 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4942 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4943 
4944 remove:
4945 	if (skb && !IS_ERR(skb))
4946 		kfree_skb(skb);
4947 
4948 	mgmt_pending_free(cmd);
4949 }
4950 
4951 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
4952 {
4953 	struct mgmt_pending_cmd *cmd = data;
4954 
4955 	if (bredr_sc_enabled(hdev))
4956 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
4957 	else
4958 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
4959 
4960 	if (IS_ERR(cmd->skb))
4961 		return PTR_ERR(cmd->skb);
4962 	else
4963 		return 0;
4964 }
4965 
4966 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4967 			       void *data, u16 data_len)
4968 {
4969 	struct mgmt_pending_cmd *cmd;
4970 	int err;
4971 
4972 	bt_dev_dbg(hdev, "sock %p", sk);
4973 
4974 	hci_dev_lock(hdev);
4975 
4976 	if (!hdev_is_powered(hdev)) {
4977 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4978 				      MGMT_STATUS_NOT_POWERED);
4979 		goto unlock;
4980 	}
4981 
4982 	if (!lmp_ssp_capable(hdev)) {
4983 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4984 				      MGMT_STATUS_NOT_SUPPORTED);
4985 		goto unlock;
4986 	}
4987 
4988 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4989 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4990 				      MGMT_STATUS_BUSY);
4991 		goto unlock;
4992 	}
4993 
4994 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4995 	if (!cmd)
4996 		err = -ENOMEM;
4997 	else
4998 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
4999 					 read_local_oob_data_complete);
5000 
5001 	if (err < 0) {
5002 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5003 				      MGMT_STATUS_FAILED);
5004 
5005 		if (cmd)
5006 			mgmt_pending_free(cmd);
5007 	}
5008 
5009 unlock:
5010 	hci_dev_unlock(hdev);
5011 	return err;
5012 }
5013 
5014 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5015 			       void *data, u16 len)
5016 {
5017 	struct mgmt_addr_info *addr = data;
5018 	int err;
5019 
5020 	bt_dev_dbg(hdev, "sock %p", sk);
5021 
5022 	if (!bdaddr_type_is_valid(addr->type))
5023 		return mgmt_cmd_complete(sk, hdev->id,
5024 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5025 					 MGMT_STATUS_INVALID_PARAMS,
5026 					 addr, sizeof(*addr));
5027 
5028 	hci_dev_lock(hdev);
5029 
5030 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5031 		struct mgmt_cp_add_remote_oob_data *cp = data;
5032 		u8 status;
5033 
5034 		if (cp->addr.type != BDADDR_BREDR) {
5035 			err = mgmt_cmd_complete(sk, hdev->id,
5036 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5037 						MGMT_STATUS_INVALID_PARAMS,
5038 						&cp->addr, sizeof(cp->addr));
5039 			goto unlock;
5040 		}
5041 
5042 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5043 					      cp->addr.type, cp->hash,
5044 					      cp->rand, NULL, NULL);
5045 		if (err < 0)
5046 			status = MGMT_STATUS_FAILED;
5047 		else
5048 			status = MGMT_STATUS_SUCCESS;
5049 
5050 		err = mgmt_cmd_complete(sk, hdev->id,
5051 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5052 					&cp->addr, sizeof(cp->addr));
5053 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5054 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5055 		u8 *rand192, *hash192, *rand256, *hash256;
5056 		u8 status;
5057 
5058 		if (bdaddr_type_is_le(cp->addr.type)) {
5059 			/* Enforce zero-valued 192-bit parameters as
5060 			 * long as legacy SMP OOB isn't implemented.
5061 			 */
5062 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5063 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5064 				err = mgmt_cmd_complete(sk, hdev->id,
5065 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5066 							MGMT_STATUS_INVALID_PARAMS,
5067 							addr, sizeof(*addr));
5068 				goto unlock;
5069 			}
5070 
5071 			rand192 = NULL;
5072 			hash192 = NULL;
5073 		} else {
5074 			/* In case one of the P-192 values is set to zero,
5075 			 * then just disable OOB data for P-192.
5076 			 */
5077 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5078 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5079 				rand192 = NULL;
5080 				hash192 = NULL;
5081 			} else {
5082 				rand192 = cp->rand192;
5083 				hash192 = cp->hash192;
5084 			}
5085 		}
5086 
5087 		/* In case one of the P-256 values is set to zero, then just
5088 		 * disable OOB data for P-256.
5089 		 */
5090 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5091 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5092 			rand256 = NULL;
5093 			hash256 = NULL;
5094 		} else {
5095 			rand256 = cp->rand256;
5096 			hash256 = cp->hash256;
5097 		}
5098 
5099 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5100 					      cp->addr.type, hash192, rand192,
5101 					      hash256, rand256);
5102 		if (err < 0)
5103 			status = MGMT_STATUS_FAILED;
5104 		else
5105 			status = MGMT_STATUS_SUCCESS;
5106 
5107 		err = mgmt_cmd_complete(sk, hdev->id,
5108 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5109 					status, &cp->addr, sizeof(cp->addr));
5110 	} else {
5111 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5112 			   len);
5113 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5114 				      MGMT_STATUS_INVALID_PARAMS);
5115 	}
5116 
5117 unlock:
5118 	hci_dev_unlock(hdev);
5119 	return err;
5120 }
5121 
5122 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5123 				  void *data, u16 len)
5124 {
5125 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5126 	u8 status;
5127 	int err;
5128 
5129 	bt_dev_dbg(hdev, "sock %p", sk);
5130 
5131 	if (cp->addr.type != BDADDR_BREDR)
5132 		return mgmt_cmd_complete(sk, hdev->id,
5133 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5134 					 MGMT_STATUS_INVALID_PARAMS,
5135 					 &cp->addr, sizeof(cp->addr));
5136 
5137 	hci_dev_lock(hdev);
5138 
5139 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5140 		hci_remote_oob_data_clear(hdev);
5141 		status = MGMT_STATUS_SUCCESS;
5142 		goto done;
5143 	}
5144 
5145 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5146 	if (err < 0)
5147 		status = MGMT_STATUS_INVALID_PARAMS;
5148 	else
5149 		status = MGMT_STATUS_SUCCESS;
5150 
5151 done:
5152 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5153 				status, &cp->addr, sizeof(cp->addr));
5154 
5155 	hci_dev_unlock(hdev);
5156 	return err;
5157 }
5158 
5159 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5160 {
5161 	struct mgmt_pending_cmd *cmd;
5162 
5163 	bt_dev_dbg(hdev, "status %u", status);
5164 
5165 	hci_dev_lock(hdev);
5166 
5167 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5168 	if (!cmd)
5169 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5170 
5171 	if (!cmd)
5172 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5173 
5174 	if (cmd) {
5175 		cmd->cmd_complete(cmd, mgmt_status(status));
5176 		mgmt_pending_remove(cmd);
5177 	}
5178 
5179 	hci_dev_unlock(hdev);
5180 }
5181 
5182 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5183 				    uint8_t *mgmt_status)
5184 {
5185 	switch (type) {
5186 	case DISCOV_TYPE_LE:
5187 		*mgmt_status = mgmt_le_support(hdev);
5188 		if (*mgmt_status)
5189 			return false;
5190 		break;
5191 	case DISCOV_TYPE_INTERLEAVED:
5192 		*mgmt_status = mgmt_le_support(hdev);
5193 		if (*mgmt_status)
5194 			return false;
5195 		fallthrough;
5196 	case DISCOV_TYPE_BREDR:
5197 		*mgmt_status = mgmt_bredr_support(hdev);
5198 		if (*mgmt_status)
5199 			return false;
5200 		break;
5201 	default:
5202 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5203 		return false;
5204 	}
5205 
5206 	return true;
5207 }
5208 
5209 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5210 {
5211 	struct mgmt_pending_cmd *cmd = data;
5212 
5213 	bt_dev_dbg(hdev, "err %d", err);
5214 
5215 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5216 			  cmd->param, 1);
5217 	mgmt_pending_free(cmd);
5218 
5219 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5220 				DISCOVERY_FINDING);
5221 }
5222 
5223 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5224 {
5225 	return hci_start_discovery_sync(hdev);
5226 }
5227 
5228 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5229 				    u16 op, void *data, u16 len)
5230 {
5231 	struct mgmt_cp_start_discovery *cp = data;
5232 	struct mgmt_pending_cmd *cmd;
5233 	u8 status;
5234 	int err;
5235 
5236 	bt_dev_dbg(hdev, "sock %p", sk);
5237 
5238 	hci_dev_lock(hdev);
5239 
5240 	if (!hdev_is_powered(hdev)) {
5241 		err = mgmt_cmd_complete(sk, hdev->id, op,
5242 					MGMT_STATUS_NOT_POWERED,
5243 					&cp->type, sizeof(cp->type));
5244 		goto failed;
5245 	}
5246 
5247 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5248 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5249 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5250 					&cp->type, sizeof(cp->type));
5251 		goto failed;
5252 	}
5253 
5254 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5255 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5256 					&cp->type, sizeof(cp->type));
5257 		goto failed;
5258 	}
5259 
5260 	/* Can't start discovery when it is paused */
5261 	if (hdev->discovery_paused) {
5262 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5263 					&cp->type, sizeof(cp->type));
5264 		goto failed;
5265 	}
5266 
5267 	/* Clear the discovery filter first to free any previously
5268 	 * allocated memory for the UUID list.
5269 	 */
5270 	hci_discovery_filter_clear(hdev);
5271 
5272 	hdev->discovery.type = cp->type;
5273 	hdev->discovery.report_invalid_rssi = false;
5274 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5275 		hdev->discovery.limited = true;
5276 	else
5277 		hdev->discovery.limited = false;
5278 
5279 	cmd = mgmt_pending_new(sk, op, hdev, data, len);
5280 	if (!cmd) {
5281 		err = -ENOMEM;
5282 		goto failed;
5283 	}
5284 
5285 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5286 				 start_discovery_complete);
5287 	if (err < 0) {
5288 		mgmt_pending_free(cmd);
5289 		goto failed;
5290 	}
5291 
5292 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5293 
5294 failed:
5295 	hci_dev_unlock(hdev);
5296 	return err;
5297 }
5298 
5299 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5300 			   void *data, u16 len)
5301 {
5302 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5303 					data, len);
5304 }
5305 
5306 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5307 				   void *data, u16 len)
5308 {
5309 	return start_discovery_internal(sk, hdev,
5310 					MGMT_OP_START_LIMITED_DISCOVERY,
5311 					data, len);
5312 }
5313 
5314 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5315 				   void *data, u16 len)
5316 {
5317 	struct mgmt_cp_start_service_discovery *cp = data;
5318 	struct mgmt_pending_cmd *cmd;
5319 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5320 	u16 uuid_count, expected_len;
5321 	u8 status;
5322 	int err;
5323 
5324 	bt_dev_dbg(hdev, "sock %p", sk);
5325 
5326 	hci_dev_lock(hdev);
5327 
5328 	if (!hdev_is_powered(hdev)) {
5329 		err = mgmt_cmd_complete(sk, hdev->id,
5330 					MGMT_OP_START_SERVICE_DISCOVERY,
5331 					MGMT_STATUS_NOT_POWERED,
5332 					&cp->type, sizeof(cp->type));
5333 		goto failed;
5334 	}
5335 
5336 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5337 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5338 		err = mgmt_cmd_complete(sk, hdev->id,
5339 					MGMT_OP_START_SERVICE_DISCOVERY,
5340 					MGMT_STATUS_BUSY, &cp->type,
5341 					sizeof(cp->type));
5342 		goto failed;
5343 	}
5344 
5345 	if (hdev->discovery_paused) {
5346 		err = mgmt_cmd_complete(sk, hdev->id,
5347 					MGMT_OP_START_SERVICE_DISCOVERY,
5348 					MGMT_STATUS_BUSY, &cp->type,
5349 					sizeof(cp->type));
5350 		goto failed;
5351 	}
5352 
5353 	uuid_count = __le16_to_cpu(cp->uuid_count);
5354 	if (uuid_count > max_uuid_count) {
5355 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5356 			   uuid_count);
5357 		err = mgmt_cmd_complete(sk, hdev->id,
5358 					MGMT_OP_START_SERVICE_DISCOVERY,
5359 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5360 					sizeof(cp->type));
5361 		goto failed;
5362 	}
5363 
5364 	expected_len = sizeof(*cp) + uuid_count * 16;
5365 	if (expected_len != len) {
5366 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5367 			   expected_len, len);
5368 		err = mgmt_cmd_complete(sk, hdev->id,
5369 					MGMT_OP_START_SERVICE_DISCOVERY,
5370 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5371 					sizeof(cp->type));
5372 		goto failed;
5373 	}
5374 
5375 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5376 		err = mgmt_cmd_complete(sk, hdev->id,
5377 					MGMT_OP_START_SERVICE_DISCOVERY,
5378 					status, &cp->type, sizeof(cp->type));
5379 		goto failed;
5380 	}
5381 
5382 	cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5383 			       hdev, data, len);
5384 	if (!cmd) {
5385 		err = -ENOMEM;
5386 		goto failed;
5387 	}
5388 
5389 	/* Clear the discovery filter first to free any previously
5390 	 * allocated memory for the UUID list.
5391 	 */
5392 	hci_discovery_filter_clear(hdev);
5393 
5394 	hdev->discovery.result_filtering = true;
5395 	hdev->discovery.type = cp->type;
5396 	hdev->discovery.rssi = cp->rssi;
5397 	hdev->discovery.uuid_count = uuid_count;
5398 
5399 	if (uuid_count > 0) {
5400 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5401 						GFP_KERNEL);
5402 		if (!hdev->discovery.uuids) {
5403 			err = mgmt_cmd_complete(sk, hdev->id,
5404 						MGMT_OP_START_SERVICE_DISCOVERY,
5405 						MGMT_STATUS_FAILED,
5406 						&cp->type, sizeof(cp->type));
5407 			mgmt_pending_remove(cmd);
5408 			goto failed;
5409 		}
5410 	}
5411 
5412 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5413 				 start_discovery_complete);
5414 	if (err < 0) {
5415 		mgmt_pending_free(cmd);
5416 		goto failed;
5417 	}
5418 
5419 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5420 
5421 failed:
5422 	hci_dev_unlock(hdev);
5423 	return err;
5424 }
5425 
5426 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5427 {
5428 	struct mgmt_pending_cmd *cmd;
5429 
5430 	bt_dev_dbg(hdev, "status %u", status);
5431 
5432 	hci_dev_lock(hdev);
5433 
5434 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5435 	if (cmd) {
5436 		cmd->cmd_complete(cmd, mgmt_status(status));
5437 		mgmt_pending_remove(cmd);
5438 	}
5439 
5440 	hci_dev_unlock(hdev);
5441 }
5442 
5443 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5444 {
5445 	struct mgmt_pending_cmd *cmd = data;
5446 
5447 	bt_dev_dbg(hdev, "err %d", err);
5448 
5449 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5450 			  cmd->param, 1);
5451 	mgmt_pending_free(cmd);
5452 
5453 	if (!err)
5454 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5455 }
5456 
5457 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5458 {
5459 	return hci_stop_discovery_sync(hdev);
5460 }
5461 
5462 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5463 			  u16 len)
5464 {
5465 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5466 	struct mgmt_pending_cmd *cmd;
5467 	int err;
5468 
5469 	bt_dev_dbg(hdev, "sock %p", sk);
5470 
5471 	hci_dev_lock(hdev);
5472 
5473 	if (!hci_discovery_active(hdev)) {
5474 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5475 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5476 					sizeof(mgmt_cp->type));
5477 		goto unlock;
5478 	}
5479 
5480 	if (hdev->discovery.type != mgmt_cp->type) {
5481 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5482 					MGMT_STATUS_INVALID_PARAMS,
5483 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5484 		goto unlock;
5485 	}
5486 
5487 	cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5488 	if (!cmd) {
5489 		err = -ENOMEM;
5490 		goto unlock;
5491 	}
5492 
5493 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5494 				 stop_discovery_complete);
5495 	if (err < 0) {
5496 		mgmt_pending_free(cmd);
5497 		goto unlock;
5498 	}
5499 
5500 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5501 
5502 unlock:
5503 	hci_dev_unlock(hdev);
5504 	return err;
5505 }
5506 
5507 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5508 			u16 len)
5509 {
5510 	struct mgmt_cp_confirm_name *cp = data;
5511 	struct inquiry_entry *e;
5512 	int err;
5513 
5514 	bt_dev_dbg(hdev, "sock %p", sk);
5515 
5516 	hci_dev_lock(hdev);
5517 
5518 	if (!hci_discovery_active(hdev)) {
5519 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5520 					MGMT_STATUS_FAILED, &cp->addr,
5521 					sizeof(cp->addr));
5522 		goto failed;
5523 	}
5524 
5525 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5526 	if (!e) {
5527 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5528 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5529 					sizeof(cp->addr));
5530 		goto failed;
5531 	}
5532 
5533 	if (cp->name_known) {
5534 		e->name_state = NAME_KNOWN;
5535 		list_del(&e->list);
5536 	} else {
5537 		e->name_state = NAME_NEEDED;
5538 		hci_inquiry_cache_update_resolve(hdev, e);
5539 	}
5540 
5541 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5542 				&cp->addr, sizeof(cp->addr));
5543 
5544 failed:
5545 	hci_dev_unlock(hdev);
5546 	return err;
5547 }
5548 
5549 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5550 			u16 len)
5551 {
5552 	struct mgmt_cp_block_device *cp = data;
5553 	u8 status;
5554 	int err;
5555 
5556 	bt_dev_dbg(hdev, "sock %p", sk);
5557 
5558 	if (!bdaddr_type_is_valid(cp->addr.type))
5559 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5560 					 MGMT_STATUS_INVALID_PARAMS,
5561 					 &cp->addr, sizeof(cp->addr));
5562 
5563 	hci_dev_lock(hdev);
5564 
5565 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5566 				  cp->addr.type);
5567 	if (err < 0) {
5568 		status = MGMT_STATUS_FAILED;
5569 		goto done;
5570 	}
5571 
5572 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5573 		   sk);
5574 	status = MGMT_STATUS_SUCCESS;
5575 
5576 done:
5577 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5578 				&cp->addr, sizeof(cp->addr));
5579 
5580 	hci_dev_unlock(hdev);
5581 
5582 	return err;
5583 }
5584 
5585 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5586 			  u16 len)
5587 {
5588 	struct mgmt_cp_unblock_device *cp = data;
5589 	u8 status;
5590 	int err;
5591 
5592 	bt_dev_dbg(hdev, "sock %p", sk);
5593 
5594 	if (!bdaddr_type_is_valid(cp->addr.type))
5595 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5596 					 MGMT_STATUS_INVALID_PARAMS,
5597 					 &cp->addr, sizeof(cp->addr));
5598 
5599 	hci_dev_lock(hdev);
5600 
5601 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5602 				  cp->addr.type);
5603 	if (err < 0) {
5604 		status = MGMT_STATUS_INVALID_PARAMS;
5605 		goto done;
5606 	}
5607 
5608 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5609 		   sk);
5610 	status = MGMT_STATUS_SUCCESS;
5611 
5612 done:
5613 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5614 				&cp->addr, sizeof(cp->addr));
5615 
5616 	hci_dev_unlock(hdev);
5617 
5618 	return err;
5619 }
5620 
5621 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5622 {
5623 	return hci_update_eir_sync(hdev);
5624 }
5625 
5626 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5627 			 u16 len)
5628 {
5629 	struct mgmt_cp_set_device_id *cp = data;
5630 	int err;
5631 	__u16 source;
5632 
5633 	bt_dev_dbg(hdev, "sock %p", sk);
5634 
5635 	source = __le16_to_cpu(cp->source);
5636 
5637 	if (source > 0x0002)
5638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5639 				       MGMT_STATUS_INVALID_PARAMS);
5640 
5641 	hci_dev_lock(hdev);
5642 
5643 	hdev->devid_source = source;
5644 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5645 	hdev->devid_product = __le16_to_cpu(cp->product);
5646 	hdev->devid_version = __le16_to_cpu(cp->version);
5647 
5648 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5649 				NULL, 0);
5650 
5651 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5652 
5653 	hci_dev_unlock(hdev);
5654 
5655 	return err;
5656 }
5657 
5658 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5659 {
5660 	if (err)
5661 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5662 	else
5663 		bt_dev_dbg(hdev, "status %d", err);
5664 }
5665 
5666 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5667 {
5668 	struct cmd_lookup match = { NULL, hdev };
5669 	u8 instance;
5670 	struct adv_info *adv_instance;
5671 	u8 status = mgmt_status(err);
5672 
5673 	if (status) {
5674 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5675 				     cmd_status_rsp, &status);
5676 		return;
5677 	}
5678 
5679 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5680 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5681 	else
5682 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5683 
5684 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5685 			     &match);
5686 
5687 	new_settings(hdev, match.sk);
5688 
5689 	if (match.sk)
5690 		sock_put(match.sk);
5691 
5692 	/* If "Set Advertising" was just disabled and instance advertising was
5693 	 * set up earlier, then re-enable multi-instance advertising.
5694 	 */
5695 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5696 	    list_empty(&hdev->adv_instances))
5697 		return;
5698 
5699 	instance = hdev->cur_adv_instance;
5700 	if (!instance) {
5701 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5702 							struct adv_info, list);
5703 		if (!adv_instance)
5704 			return;
5705 
5706 		instance = adv_instance->instance;
5707 	}
5708 
5709 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5710 
5711 	enable_advertising_instance(hdev, err);
5712 }
5713 
5714 static int set_adv_sync(struct hci_dev *hdev, void *data)
5715 {
5716 	struct mgmt_pending_cmd *cmd = data;
5717 	struct mgmt_mode *cp = cmd->param;
5718 	u8 val = !!cp->val;
5719 
5720 	if (cp->val == 0x02)
5721 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5722 	else
5723 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5724 
5725 	cancel_adv_timeout(hdev);
5726 
5727 	if (val) {
5728 		/* Switch to instance "0" for the Set Advertising setting.
5729 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5730 		 * HCI_ADVERTISING flag is not yet set.
5731 		 */
5732 		hdev->cur_adv_instance = 0x00;
5733 
5734 		if (ext_adv_capable(hdev)) {
5735 			hci_start_ext_adv_sync(hdev, 0x00);
5736 		} else {
5737 			hci_update_adv_data_sync(hdev, 0x00);
5738 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5739 			hci_enable_advertising_sync(hdev);
5740 		}
5741 	} else {
5742 		hci_disable_advertising_sync(hdev);
5743 	}
5744 
5745 	return 0;
5746 }
5747 
5748 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5749 			   u16 len)
5750 {
5751 	struct mgmt_mode *cp = data;
5752 	struct mgmt_pending_cmd *cmd;
5753 	u8 val, status;
5754 	int err;
5755 
5756 	bt_dev_dbg(hdev, "sock %p", sk);
5757 
5758 	status = mgmt_le_support(hdev);
5759 	if (status)
5760 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5761 				       status);
5762 
5763 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5764 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5765 				       MGMT_STATUS_INVALID_PARAMS);
5766 
5767 	if (hdev->advertising_paused)
5768 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5769 				       MGMT_STATUS_BUSY);
5770 
5771 	hci_dev_lock(hdev);
5772 
5773 	val = !!cp->val;
5774 
5775 	/* The following conditions are ones which mean that we should
5776 	 * not do any HCI communication but directly send a mgmt
5777 	 * response to user space (after toggling the flag if
5778 	 * necessary).
5779 	 */
5780 	if (!hdev_is_powered(hdev) ||
5781 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5782 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5783 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5784 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5785 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5786 		bool changed;
5787 
5788 		if (cp->val) {
5789 			hdev->cur_adv_instance = 0x00;
5790 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5791 			if (cp->val == 0x02)
5792 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5793 			else
5794 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5795 		} else {
5796 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5797 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5798 		}
5799 
5800 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5801 		if (err < 0)
5802 			goto unlock;
5803 
5804 		if (changed)
5805 			err = new_settings(hdev, sk);
5806 
5807 		goto unlock;
5808 	}
5809 
5810 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5811 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5812 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5813 				      MGMT_STATUS_BUSY);
5814 		goto unlock;
5815 	}
5816 
5817 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5818 	if (!cmd)
5819 		err = -ENOMEM;
5820 	else
5821 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5822 					 set_advertising_complete);
5823 
5824 	if (err < 0 && cmd)
5825 		mgmt_pending_remove(cmd);
5826 
5827 unlock:
5828 	hci_dev_unlock(hdev);
5829 	return err;
5830 }
5831 
5832 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5833 			      void *data, u16 len)
5834 {
5835 	struct mgmt_cp_set_static_address *cp = data;
5836 	int err;
5837 
5838 	bt_dev_dbg(hdev, "sock %p", sk);
5839 
5840 	if (!lmp_le_capable(hdev))
5841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5842 				       MGMT_STATUS_NOT_SUPPORTED);
5843 
5844 	if (hdev_is_powered(hdev))
5845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5846 				       MGMT_STATUS_REJECTED);
5847 
5848 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5849 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5850 			return mgmt_cmd_status(sk, hdev->id,
5851 					       MGMT_OP_SET_STATIC_ADDRESS,
5852 					       MGMT_STATUS_INVALID_PARAMS);
5853 
5854 		/* Two most significant bits shall be set */
5855 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5856 			return mgmt_cmd_status(sk, hdev->id,
5857 					       MGMT_OP_SET_STATIC_ADDRESS,
5858 					       MGMT_STATUS_INVALID_PARAMS);
5859 	}
5860 
5861 	hci_dev_lock(hdev);
5862 
5863 	bacpy(&hdev->static_addr, &cp->bdaddr);
5864 
5865 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5866 	if (err < 0)
5867 		goto unlock;
5868 
5869 	err = new_settings(hdev, sk);
5870 
5871 unlock:
5872 	hci_dev_unlock(hdev);
5873 	return err;
5874 }
5875 
5876 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5877 			   void *data, u16 len)
5878 {
5879 	struct mgmt_cp_set_scan_params *cp = data;
5880 	__u16 interval, window;
5881 	int err;
5882 
5883 	bt_dev_dbg(hdev, "sock %p", sk);
5884 
5885 	if (!lmp_le_capable(hdev))
5886 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5887 				       MGMT_STATUS_NOT_SUPPORTED);
5888 
5889 	interval = __le16_to_cpu(cp->interval);
5890 
5891 	if (interval < 0x0004 || interval > 0x4000)
5892 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5893 				       MGMT_STATUS_INVALID_PARAMS);
5894 
5895 	window = __le16_to_cpu(cp->window);
5896 
5897 	if (window < 0x0004 || window > 0x4000)
5898 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5899 				       MGMT_STATUS_INVALID_PARAMS);
5900 
5901 	if (window > interval)
5902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5903 				       MGMT_STATUS_INVALID_PARAMS);
5904 
5905 	hci_dev_lock(hdev);
5906 
5907 	hdev->le_scan_interval = interval;
5908 	hdev->le_scan_window = window;
5909 
5910 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5911 				NULL, 0);
5912 
5913 	/* If background scan is running, restart it so new parameters are
5914 	 * loaded.
5915 	 */
5916 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5917 	    hdev->discovery.state == DISCOVERY_STOPPED)
5918 		hci_update_passive_scan(hdev);
5919 
5920 	hci_dev_unlock(hdev);
5921 
5922 	return err;
5923 }
5924 
5925 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
5926 {
5927 	struct mgmt_pending_cmd *cmd = data;
5928 
5929 	bt_dev_dbg(hdev, "err %d", err);
5930 
5931 	if (err) {
5932 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5933 				mgmt_status(err));
5934 	} else {
5935 		struct mgmt_mode *cp = cmd->param;
5936 
5937 		if (cp->val)
5938 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5939 		else
5940 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5941 
5942 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5943 		new_settings(hdev, cmd->sk);
5944 	}
5945 
5946 	mgmt_pending_free(cmd);
5947 }
5948 
5949 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
5950 {
5951 	struct mgmt_pending_cmd *cmd = data;
5952 	struct mgmt_mode *cp = cmd->param;
5953 
5954 	return hci_write_fast_connectable_sync(hdev, cp->val);
5955 }
5956 
5957 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5958 				void *data, u16 len)
5959 {
5960 	struct mgmt_mode *cp = data;
5961 	struct mgmt_pending_cmd *cmd;
5962 	int err;
5963 
5964 	bt_dev_dbg(hdev, "sock %p", sk);
5965 
5966 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5967 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5968 		return mgmt_cmd_status(sk, hdev->id,
5969 				       MGMT_OP_SET_FAST_CONNECTABLE,
5970 				       MGMT_STATUS_NOT_SUPPORTED);
5971 
5972 	if (cp->val != 0x00 && cp->val != 0x01)
5973 		return mgmt_cmd_status(sk, hdev->id,
5974 				       MGMT_OP_SET_FAST_CONNECTABLE,
5975 				       MGMT_STATUS_INVALID_PARAMS);
5976 
5977 	hci_dev_lock(hdev);
5978 
5979 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5980 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5981 		goto unlock;
5982 	}
5983 
5984 	if (!hdev_is_powered(hdev)) {
5985 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5986 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5987 		new_settings(hdev, sk);
5988 		goto unlock;
5989 	}
5990 
5991 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
5992 			       len);
5993 	if (!cmd)
5994 		err = -ENOMEM;
5995 	else
5996 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
5997 					 fast_connectable_complete);
5998 
5999 	if (err < 0) {
6000 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6001 				MGMT_STATUS_FAILED);
6002 
6003 		if (cmd)
6004 			mgmt_pending_free(cmd);
6005 	}
6006 
6007 unlock:
6008 	hci_dev_unlock(hdev);
6009 
6010 	return err;
6011 }
6012 
6013 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6014 {
6015 	struct mgmt_pending_cmd *cmd = data;
6016 
6017 	bt_dev_dbg(hdev, "err %d", err);
6018 
6019 	if (err) {
6020 		u8 mgmt_err = mgmt_status(err);
6021 
6022 		/* We need to restore the flag if related HCI commands
6023 		 * failed.
6024 		 */
6025 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6026 
6027 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6028 	} else {
6029 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6030 		new_settings(hdev, cmd->sk);
6031 	}
6032 
6033 	mgmt_pending_free(cmd);
6034 }
6035 
6036 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6037 {
6038 	int status;
6039 
6040 	status = hci_write_fast_connectable_sync(hdev, false);
6041 
6042 	if (!status)
6043 		status = hci_update_scan_sync(hdev);
6044 
6045 	/* Since only the advertising data flags will change, there
6046 	 * is no need to update the scan response data.
6047 	 */
6048 	if (!status)
6049 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6050 
6051 	return status;
6052 }
6053 
6054 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6055 {
6056 	struct mgmt_mode *cp = data;
6057 	struct mgmt_pending_cmd *cmd;
6058 	int err;
6059 
6060 	bt_dev_dbg(hdev, "sock %p", sk);
6061 
6062 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6063 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6064 				       MGMT_STATUS_NOT_SUPPORTED);
6065 
6066 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6067 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6068 				       MGMT_STATUS_REJECTED);
6069 
6070 	if (cp->val != 0x00 && cp->val != 0x01)
6071 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6072 				       MGMT_STATUS_INVALID_PARAMS);
6073 
6074 	hci_dev_lock(hdev);
6075 
6076 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6077 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6078 		goto unlock;
6079 	}
6080 
6081 	if (!hdev_is_powered(hdev)) {
6082 		if (!cp->val) {
6083 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6084 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6085 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6086 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6087 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6088 		}
6089 
6090 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6091 
6092 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6093 		if (err < 0)
6094 			goto unlock;
6095 
6096 		err = new_settings(hdev, sk);
6097 		goto unlock;
6098 	}
6099 
6100 	/* Reject disabling when powered on */
6101 	if (!cp->val) {
6102 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6103 				      MGMT_STATUS_REJECTED);
6104 		goto unlock;
6105 	} else {
6106 		/* When configuring a dual-mode controller to operate
6107 		 * with LE only and using a static address, then switching
6108 		 * BR/EDR back on is not allowed.
6109 		 *
6110 		 * Dual-mode controllers shall operate with the public
6111 		 * address as its identity address for BR/EDR and LE. So
6112 		 * reject the attempt to create an invalid configuration.
6113 		 *
6114 		 * The same restrictions applies when secure connections
6115 		 * has been enabled. For BR/EDR this is a controller feature
6116 		 * while for LE it is a host stack feature. This means that
6117 		 * switching BR/EDR back on when secure connections has been
6118 		 * enabled is not a supported transaction.
6119 		 */
6120 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6121 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6122 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6123 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6124 					      MGMT_STATUS_REJECTED);
6125 			goto unlock;
6126 		}
6127 	}
6128 
6129 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6130 	if (!cmd)
6131 		err = -ENOMEM;
6132 	else
6133 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6134 					 set_bredr_complete);
6135 
6136 	if (err < 0) {
6137 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6138 				MGMT_STATUS_FAILED);
6139 		if (cmd)
6140 			mgmt_pending_free(cmd);
6141 
6142 		goto unlock;
6143 	}
6144 
6145 	/* We need to flip the bit already here so that
6146 	 * hci_req_update_adv_data generates the correct flags.
6147 	 */
6148 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6149 
6150 unlock:
6151 	hci_dev_unlock(hdev);
6152 	return err;
6153 }
6154 
6155 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6156 {
6157 	struct mgmt_pending_cmd *cmd = data;
6158 	struct mgmt_mode *cp;
6159 
6160 	bt_dev_dbg(hdev, "err %d", err);
6161 
6162 	if (err) {
6163 		u8 mgmt_err = mgmt_status(err);
6164 
6165 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6166 		goto done;
6167 	}
6168 
6169 	cp = cmd->param;
6170 
6171 	switch (cp->val) {
6172 	case 0x00:
6173 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6174 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6175 		break;
6176 	case 0x01:
6177 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6178 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6179 		break;
6180 	case 0x02:
6181 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6182 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6183 		break;
6184 	}
6185 
6186 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6187 	new_settings(hdev, cmd->sk);
6188 
6189 done:
6190 	mgmt_pending_free(cmd);
6191 }
6192 
6193 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6194 {
6195 	struct mgmt_pending_cmd *cmd = data;
6196 	struct mgmt_mode *cp = cmd->param;
6197 	u8 val = !!cp->val;
6198 
6199 	/* Force write of val */
6200 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6201 
6202 	return hci_write_sc_support_sync(hdev, val);
6203 }
6204 
6205 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6206 			   void *data, u16 len)
6207 {
6208 	struct mgmt_mode *cp = data;
6209 	struct mgmt_pending_cmd *cmd;
6210 	u8 val;
6211 	int err;
6212 
6213 	bt_dev_dbg(hdev, "sock %p", sk);
6214 
6215 	if (!lmp_sc_capable(hdev) &&
6216 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6217 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6218 				       MGMT_STATUS_NOT_SUPPORTED);
6219 
6220 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6221 	    lmp_sc_capable(hdev) &&
6222 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6223 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6224 				       MGMT_STATUS_REJECTED);
6225 
6226 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6227 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6228 				       MGMT_STATUS_INVALID_PARAMS);
6229 
6230 	hci_dev_lock(hdev);
6231 
6232 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6233 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6234 		bool changed;
6235 
6236 		if (cp->val) {
6237 			changed = !hci_dev_test_and_set_flag(hdev,
6238 							     HCI_SC_ENABLED);
6239 			if (cp->val == 0x02)
6240 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6241 			else
6242 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6243 		} else {
6244 			changed = hci_dev_test_and_clear_flag(hdev,
6245 							      HCI_SC_ENABLED);
6246 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6247 		}
6248 
6249 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6250 		if (err < 0)
6251 			goto failed;
6252 
6253 		if (changed)
6254 			err = new_settings(hdev, sk);
6255 
6256 		goto failed;
6257 	}
6258 
6259 	val = !!cp->val;
6260 
6261 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6262 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6263 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6264 		goto failed;
6265 	}
6266 
6267 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6268 	if (!cmd)
6269 		err = -ENOMEM;
6270 	else
6271 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6272 					 set_secure_conn_complete);
6273 
6274 	if (err < 0) {
6275 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6276 				MGMT_STATUS_FAILED);
6277 		if (cmd)
6278 			mgmt_pending_free(cmd);
6279 	}
6280 
6281 failed:
6282 	hci_dev_unlock(hdev);
6283 	return err;
6284 }
6285 
6286 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6287 			  void *data, u16 len)
6288 {
6289 	struct mgmt_mode *cp = data;
6290 	bool changed, use_changed;
6291 	int err;
6292 
6293 	bt_dev_dbg(hdev, "sock %p", sk);
6294 
6295 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6296 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6297 				       MGMT_STATUS_INVALID_PARAMS);
6298 
6299 	hci_dev_lock(hdev);
6300 
6301 	if (cp->val)
6302 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6303 	else
6304 		changed = hci_dev_test_and_clear_flag(hdev,
6305 						      HCI_KEEP_DEBUG_KEYS);
6306 
6307 	if (cp->val == 0x02)
6308 		use_changed = !hci_dev_test_and_set_flag(hdev,
6309 							 HCI_USE_DEBUG_KEYS);
6310 	else
6311 		use_changed = hci_dev_test_and_clear_flag(hdev,
6312 							  HCI_USE_DEBUG_KEYS);
6313 
6314 	if (hdev_is_powered(hdev) && use_changed &&
6315 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6316 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6317 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6318 			     sizeof(mode), &mode);
6319 	}
6320 
6321 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6322 	if (err < 0)
6323 		goto unlock;
6324 
6325 	if (changed)
6326 		err = new_settings(hdev, sk);
6327 
6328 unlock:
6329 	hci_dev_unlock(hdev);
6330 	return err;
6331 }
6332 
6333 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6334 		       u16 len)
6335 {
6336 	struct mgmt_cp_set_privacy *cp = cp_data;
6337 	bool changed;
6338 	int err;
6339 
6340 	bt_dev_dbg(hdev, "sock %p", sk);
6341 
6342 	if (!lmp_le_capable(hdev))
6343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6344 				       MGMT_STATUS_NOT_SUPPORTED);
6345 
6346 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6348 				       MGMT_STATUS_INVALID_PARAMS);
6349 
6350 	if (hdev_is_powered(hdev))
6351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6352 				       MGMT_STATUS_REJECTED);
6353 
6354 	hci_dev_lock(hdev);
6355 
6356 	/* If user space supports this command it is also expected to
6357 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6358 	 */
6359 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6360 
6361 	if (cp->privacy) {
6362 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6363 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6364 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6365 		hci_adv_instances_set_rpa_expired(hdev, true);
6366 		if (cp->privacy == 0x02)
6367 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6368 		else
6369 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6370 	} else {
6371 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6372 		memset(hdev->irk, 0, sizeof(hdev->irk));
6373 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6374 		hci_adv_instances_set_rpa_expired(hdev, false);
6375 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6376 	}
6377 
6378 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6379 	if (err < 0)
6380 		goto unlock;
6381 
6382 	if (changed)
6383 		err = new_settings(hdev, sk);
6384 
6385 unlock:
6386 	hci_dev_unlock(hdev);
6387 	return err;
6388 }
6389 
6390 static bool irk_is_valid(struct mgmt_irk_info *irk)
6391 {
6392 	switch (irk->addr.type) {
6393 	case BDADDR_LE_PUBLIC:
6394 		return true;
6395 
6396 	case BDADDR_LE_RANDOM:
6397 		/* Two most significant bits shall be set */
6398 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6399 			return false;
6400 		return true;
6401 	}
6402 
6403 	return false;
6404 }
6405 
6406 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6407 		     u16 len)
6408 {
6409 	struct mgmt_cp_load_irks *cp = cp_data;
6410 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6411 				   sizeof(struct mgmt_irk_info));
6412 	u16 irk_count, expected_len;
6413 	int i, err;
6414 
6415 	bt_dev_dbg(hdev, "sock %p", sk);
6416 
6417 	if (!lmp_le_capable(hdev))
6418 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6419 				       MGMT_STATUS_NOT_SUPPORTED);
6420 
6421 	irk_count = __le16_to_cpu(cp->irk_count);
6422 	if (irk_count > max_irk_count) {
6423 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6424 			   irk_count);
6425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6426 				       MGMT_STATUS_INVALID_PARAMS);
6427 	}
6428 
6429 	expected_len = struct_size(cp, irks, irk_count);
6430 	if (expected_len != len) {
6431 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6432 			   expected_len, len);
6433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6434 				       MGMT_STATUS_INVALID_PARAMS);
6435 	}
6436 
6437 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6438 
6439 	for (i = 0; i < irk_count; i++) {
6440 		struct mgmt_irk_info *key = &cp->irks[i];
6441 
6442 		if (!irk_is_valid(key))
6443 			return mgmt_cmd_status(sk, hdev->id,
6444 					       MGMT_OP_LOAD_IRKS,
6445 					       MGMT_STATUS_INVALID_PARAMS);
6446 	}
6447 
6448 	hci_dev_lock(hdev);
6449 
6450 	hci_smp_irks_clear(hdev);
6451 
6452 	for (i = 0; i < irk_count; i++) {
6453 		struct mgmt_irk_info *irk = &cp->irks[i];
6454 
6455 		if (hci_is_blocked_key(hdev,
6456 				       HCI_BLOCKED_KEY_TYPE_IRK,
6457 				       irk->val)) {
6458 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6459 				    &irk->addr.bdaddr);
6460 			continue;
6461 		}
6462 
6463 		hci_add_irk(hdev, &irk->addr.bdaddr,
6464 			    le_addr_type(irk->addr.type), irk->val,
6465 			    BDADDR_ANY);
6466 	}
6467 
6468 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6469 
6470 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6471 
6472 	hci_dev_unlock(hdev);
6473 
6474 	return err;
6475 }
6476 
6477 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6478 {
6479 	if (key->initiator != 0x00 && key->initiator != 0x01)
6480 		return false;
6481 
6482 	switch (key->addr.type) {
6483 	case BDADDR_LE_PUBLIC:
6484 		return true;
6485 
6486 	case BDADDR_LE_RANDOM:
6487 		/* Two most significant bits shall be set */
6488 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6489 			return false;
6490 		return true;
6491 	}
6492 
6493 	return false;
6494 }
6495 
6496 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6497 			       void *cp_data, u16 len)
6498 {
6499 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6500 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6501 				   sizeof(struct mgmt_ltk_info));
6502 	u16 key_count, expected_len;
6503 	int i, err;
6504 
6505 	bt_dev_dbg(hdev, "sock %p", sk);
6506 
6507 	if (!lmp_le_capable(hdev))
6508 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6509 				       MGMT_STATUS_NOT_SUPPORTED);
6510 
6511 	key_count = __le16_to_cpu(cp->key_count);
6512 	if (key_count > max_key_count) {
6513 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6514 			   key_count);
6515 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6516 				       MGMT_STATUS_INVALID_PARAMS);
6517 	}
6518 
6519 	expected_len = struct_size(cp, keys, key_count);
6520 	if (expected_len != len) {
6521 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6522 			   expected_len, len);
6523 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6524 				       MGMT_STATUS_INVALID_PARAMS);
6525 	}
6526 
6527 	bt_dev_dbg(hdev, "key_count %u", key_count);
6528 
6529 	for (i = 0; i < key_count; i++) {
6530 		struct mgmt_ltk_info *key = &cp->keys[i];
6531 
6532 		if (!ltk_is_valid(key))
6533 			return mgmt_cmd_status(sk, hdev->id,
6534 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6535 					       MGMT_STATUS_INVALID_PARAMS);
6536 	}
6537 
6538 	hci_dev_lock(hdev);
6539 
6540 	hci_smp_ltks_clear(hdev);
6541 
6542 	for (i = 0; i < key_count; i++) {
6543 		struct mgmt_ltk_info *key = &cp->keys[i];
6544 		u8 type, authenticated;
6545 
6546 		if (hci_is_blocked_key(hdev,
6547 				       HCI_BLOCKED_KEY_TYPE_LTK,
6548 				       key->val)) {
6549 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6550 				    &key->addr.bdaddr);
6551 			continue;
6552 		}
6553 
6554 		switch (key->type) {
6555 		case MGMT_LTK_UNAUTHENTICATED:
6556 			authenticated = 0x00;
6557 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6558 			break;
6559 		case MGMT_LTK_AUTHENTICATED:
6560 			authenticated = 0x01;
6561 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6562 			break;
6563 		case MGMT_LTK_P256_UNAUTH:
6564 			authenticated = 0x00;
6565 			type = SMP_LTK_P256;
6566 			break;
6567 		case MGMT_LTK_P256_AUTH:
6568 			authenticated = 0x01;
6569 			type = SMP_LTK_P256;
6570 			break;
6571 		case MGMT_LTK_P256_DEBUG:
6572 			authenticated = 0x00;
6573 			type = SMP_LTK_P256_DEBUG;
6574 			fallthrough;
6575 		default:
6576 			continue;
6577 		}
6578 
6579 		hci_add_ltk(hdev, &key->addr.bdaddr,
6580 			    le_addr_type(key->addr.type), type, authenticated,
6581 			    key->val, key->enc_size, key->ediv, key->rand);
6582 	}
6583 
6584 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6585 			   NULL, 0);
6586 
6587 	hci_dev_unlock(hdev);
6588 
6589 	return err;
6590 }
6591 
6592 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6593 {
6594 	struct mgmt_pending_cmd *cmd = data;
6595 	struct hci_conn *conn = cmd->user_data;
6596 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6597 	struct mgmt_rp_get_conn_info rp;
6598 	u8 status;
6599 
6600 	bt_dev_dbg(hdev, "err %d", err);
6601 
6602 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6603 
6604 	status = mgmt_status(err);
6605 	if (status == MGMT_STATUS_SUCCESS) {
6606 		rp.rssi = conn->rssi;
6607 		rp.tx_power = conn->tx_power;
6608 		rp.max_tx_power = conn->max_tx_power;
6609 	} else {
6610 		rp.rssi = HCI_RSSI_INVALID;
6611 		rp.tx_power = HCI_TX_POWER_INVALID;
6612 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6613 	}
6614 
6615 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6616 			  &rp, sizeof(rp));
6617 
6618 	if (conn) {
6619 		hci_conn_drop(conn);
6620 		hci_conn_put(conn);
6621 	}
6622 
6623 	mgmt_pending_free(cmd);
6624 }
6625 
6626 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6627 {
6628 	struct mgmt_pending_cmd *cmd = data;
6629 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6630 	struct hci_conn *conn;
6631 	int err;
6632 	__le16   handle;
6633 
6634 	/* Make sure we are still connected */
6635 	if (cp->addr.type == BDADDR_BREDR)
6636 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6637 					       &cp->addr.bdaddr);
6638 	else
6639 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6640 
6641 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6642 		if (cmd->user_data) {
6643 			hci_conn_drop(cmd->user_data);
6644 			hci_conn_put(cmd->user_data);
6645 			cmd->user_data = NULL;
6646 		}
6647 		return MGMT_STATUS_NOT_CONNECTED;
6648 	}
6649 
6650 	handle = cpu_to_le16(conn->handle);
6651 
6652 	/* Refresh RSSI each time */
6653 	err = hci_read_rssi_sync(hdev, handle);
6654 
6655 	/* For LE links TX power does not change thus we don't need to
6656 	 * query for it once value is known.
6657 	 */
6658 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6659 		     conn->tx_power == HCI_TX_POWER_INVALID))
6660 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6661 
6662 	/* Max TX power needs to be read only once per connection */
6663 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6664 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6665 
6666 	return err;
6667 }
6668 
6669 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6670 			 u16 len)
6671 {
6672 	struct mgmt_cp_get_conn_info *cp = data;
6673 	struct mgmt_rp_get_conn_info rp;
6674 	struct hci_conn *conn;
6675 	unsigned long conn_info_age;
6676 	int err = 0;
6677 
6678 	bt_dev_dbg(hdev, "sock %p", sk);
6679 
6680 	memset(&rp, 0, sizeof(rp));
6681 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6682 	rp.addr.type = cp->addr.type;
6683 
6684 	if (!bdaddr_type_is_valid(cp->addr.type))
6685 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6686 					 MGMT_STATUS_INVALID_PARAMS,
6687 					 &rp, sizeof(rp));
6688 
6689 	hci_dev_lock(hdev);
6690 
6691 	if (!hdev_is_powered(hdev)) {
6692 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6693 					MGMT_STATUS_NOT_POWERED, &rp,
6694 					sizeof(rp));
6695 		goto unlock;
6696 	}
6697 
6698 	if (cp->addr.type == BDADDR_BREDR)
6699 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6700 					       &cp->addr.bdaddr);
6701 	else
6702 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6703 
6704 	if (!conn || conn->state != BT_CONNECTED) {
6705 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6706 					MGMT_STATUS_NOT_CONNECTED, &rp,
6707 					sizeof(rp));
6708 		goto unlock;
6709 	}
6710 
6711 	/* To avoid client trying to guess when to poll again for information we
6712 	 * calculate conn info age as random value between min/max set in hdev.
6713 	 */
6714 	conn_info_age = hdev->conn_info_min_age +
6715 			prandom_u32_max(hdev->conn_info_max_age -
6716 					hdev->conn_info_min_age);
6717 
6718 	/* Query controller to refresh cached values if they are too old or were
6719 	 * never read.
6720 	 */
6721 	if (time_after(jiffies, conn->conn_info_timestamp +
6722 		       msecs_to_jiffies(conn_info_age)) ||
6723 	    !conn->conn_info_timestamp) {
6724 		struct mgmt_pending_cmd *cmd;
6725 
6726 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6727 				       len);
6728 		if (!cmd)
6729 			err = -ENOMEM;
6730 		else
6731 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6732 						 cmd, get_conn_info_complete);
6733 
6734 		if (err < 0) {
6735 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6736 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6737 
6738 			if (cmd)
6739 				mgmt_pending_free(cmd);
6740 
6741 			goto unlock;
6742 		}
6743 
6744 		hci_conn_hold(conn);
6745 		cmd->user_data = hci_conn_get(conn);
6746 
6747 		conn->conn_info_timestamp = jiffies;
6748 	} else {
6749 		/* Cache is valid, just reply with values cached in hci_conn */
6750 		rp.rssi = conn->rssi;
6751 		rp.tx_power = conn->tx_power;
6752 		rp.max_tx_power = conn->max_tx_power;
6753 
6754 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6755 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6756 	}
6757 
6758 unlock:
6759 	hci_dev_unlock(hdev);
6760 	return err;
6761 }
6762 
6763 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6764 {
6765 	struct mgmt_pending_cmd *cmd = data;
6766 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6767 	struct mgmt_rp_get_clock_info rp;
6768 	struct hci_conn *conn = cmd->user_data;
6769 	u8 status = mgmt_status(err);
6770 
6771 	bt_dev_dbg(hdev, "err %d", err);
6772 
6773 	memset(&rp, 0, sizeof(rp));
6774 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6775 	rp.addr.type = cp->addr.type;
6776 
6777 	if (err)
6778 		goto complete;
6779 
6780 	rp.local_clock = cpu_to_le32(hdev->clock);
6781 
6782 	if (conn) {
6783 		rp.piconet_clock = cpu_to_le32(conn->clock);
6784 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6785 		hci_conn_drop(conn);
6786 		hci_conn_put(conn);
6787 	}
6788 
6789 complete:
6790 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6791 			  sizeof(rp));
6792 
6793 	mgmt_pending_free(cmd);
6794 }
6795 
6796 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6797 {
6798 	struct mgmt_pending_cmd *cmd = data;
6799 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6800 	struct hci_cp_read_clock hci_cp;
6801 	struct hci_conn *conn = cmd->user_data;
6802 	int err;
6803 
6804 	memset(&hci_cp, 0, sizeof(hci_cp));
6805 	err = hci_read_clock_sync(hdev, &hci_cp);
6806 
6807 	if (conn) {
6808 		/* Make sure connection still exists */
6809 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6810 					       &cp->addr.bdaddr);
6811 
6812 		if (conn && conn == cmd->user_data &&
6813 		    conn->state == BT_CONNECTED) {
6814 			hci_cp.handle = cpu_to_le16(conn->handle);
6815 			hci_cp.which = 0x01; /* Piconet clock */
6816 			err = hci_read_clock_sync(hdev, &hci_cp);
6817 		} else if (cmd->user_data) {
6818 			hci_conn_drop(cmd->user_data);
6819 			hci_conn_put(cmd->user_data);
6820 			cmd->user_data = NULL;
6821 		}
6822 	}
6823 
6824 	return err;
6825 }
6826 
6827 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6828 								u16 len)
6829 {
6830 	struct mgmt_cp_get_clock_info *cp = data;
6831 	struct mgmt_rp_get_clock_info rp;
6832 	struct mgmt_pending_cmd *cmd;
6833 	struct hci_conn *conn;
6834 	int err;
6835 
6836 	bt_dev_dbg(hdev, "sock %p", sk);
6837 
6838 	memset(&rp, 0, sizeof(rp));
6839 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6840 	rp.addr.type = cp->addr.type;
6841 
6842 	if (cp->addr.type != BDADDR_BREDR)
6843 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6844 					 MGMT_STATUS_INVALID_PARAMS,
6845 					 &rp, sizeof(rp));
6846 
6847 	hci_dev_lock(hdev);
6848 
6849 	if (!hdev_is_powered(hdev)) {
6850 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6851 					MGMT_STATUS_NOT_POWERED, &rp,
6852 					sizeof(rp));
6853 		goto unlock;
6854 	}
6855 
6856 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6857 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6858 					       &cp->addr.bdaddr);
6859 		if (!conn || conn->state != BT_CONNECTED) {
6860 			err = mgmt_cmd_complete(sk, hdev->id,
6861 						MGMT_OP_GET_CLOCK_INFO,
6862 						MGMT_STATUS_NOT_CONNECTED,
6863 						&rp, sizeof(rp));
6864 			goto unlock;
6865 		}
6866 	} else {
6867 		conn = NULL;
6868 	}
6869 
6870 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6871 	if (!cmd)
6872 		err = -ENOMEM;
6873 	else
6874 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6875 					 get_clock_info_complete);
6876 
6877 	if (err < 0) {
6878 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6879 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6880 
6881 		if (cmd)
6882 			mgmt_pending_free(cmd);
6883 
6884 	} else if (conn) {
6885 		hci_conn_hold(conn);
6886 		cmd->user_data = hci_conn_get(conn);
6887 	}
6888 
6889 
6890 unlock:
6891 	hci_dev_unlock(hdev);
6892 	return err;
6893 }
6894 
6895 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6896 {
6897 	struct hci_conn *conn;
6898 
6899 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6900 	if (!conn)
6901 		return false;
6902 
6903 	if (conn->dst_type != type)
6904 		return false;
6905 
6906 	if (conn->state != BT_CONNECTED)
6907 		return false;
6908 
6909 	return true;
6910 }
6911 
6912 /* This function requires the caller holds hdev->lock */
6913 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6914 			       u8 addr_type, u8 auto_connect)
6915 {
6916 	struct hci_conn_params *params;
6917 
6918 	params = hci_conn_params_add(hdev, addr, addr_type);
6919 	if (!params)
6920 		return -EIO;
6921 
6922 	if (params->auto_connect == auto_connect)
6923 		return 0;
6924 
6925 	list_del_init(&params->action);
6926 
6927 	switch (auto_connect) {
6928 	case HCI_AUTO_CONN_DISABLED:
6929 	case HCI_AUTO_CONN_LINK_LOSS:
6930 		/* If auto connect is being disabled when we're trying to
6931 		 * connect to device, keep connecting.
6932 		 */
6933 		if (params->explicit_connect)
6934 			list_add(&params->action, &hdev->pend_le_conns);
6935 		break;
6936 	case HCI_AUTO_CONN_REPORT:
6937 		if (params->explicit_connect)
6938 			list_add(&params->action, &hdev->pend_le_conns);
6939 		else
6940 			list_add(&params->action, &hdev->pend_le_reports);
6941 		break;
6942 	case HCI_AUTO_CONN_DIRECT:
6943 	case HCI_AUTO_CONN_ALWAYS:
6944 		if (!is_connected(hdev, addr, addr_type))
6945 			list_add(&params->action, &hdev->pend_le_conns);
6946 		break;
6947 	}
6948 
6949 	params->auto_connect = auto_connect;
6950 
6951 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6952 		   addr, addr_type, auto_connect);
6953 
6954 	return 0;
6955 }
6956 
6957 static void device_added(struct sock *sk, struct hci_dev *hdev,
6958 			 bdaddr_t *bdaddr, u8 type, u8 action)
6959 {
6960 	struct mgmt_ev_device_added ev;
6961 
6962 	bacpy(&ev.addr.bdaddr, bdaddr);
6963 	ev.addr.type = type;
6964 	ev.action = action;
6965 
6966 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6967 }
6968 
6969 static int add_device_sync(struct hci_dev *hdev, void *data)
6970 {
6971 	return hci_update_passive_scan_sync(hdev);
6972 }
6973 
6974 static int add_device(struct sock *sk, struct hci_dev *hdev,
6975 		      void *data, u16 len)
6976 {
6977 	struct mgmt_cp_add_device *cp = data;
6978 	u8 auto_conn, addr_type;
6979 	struct hci_conn_params *params;
6980 	int err;
6981 	u32 current_flags = 0;
6982 
6983 	bt_dev_dbg(hdev, "sock %p", sk);
6984 
6985 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6986 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6987 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6988 					 MGMT_STATUS_INVALID_PARAMS,
6989 					 &cp->addr, sizeof(cp->addr));
6990 
6991 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6992 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6993 					 MGMT_STATUS_INVALID_PARAMS,
6994 					 &cp->addr, sizeof(cp->addr));
6995 
6996 	hci_dev_lock(hdev);
6997 
6998 	if (cp->addr.type == BDADDR_BREDR) {
6999 		/* Only incoming connections action is supported for now */
7000 		if (cp->action != 0x01) {
7001 			err = mgmt_cmd_complete(sk, hdev->id,
7002 						MGMT_OP_ADD_DEVICE,
7003 						MGMT_STATUS_INVALID_PARAMS,
7004 						&cp->addr, sizeof(cp->addr));
7005 			goto unlock;
7006 		}
7007 
7008 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7009 						     &cp->addr.bdaddr,
7010 						     cp->addr.type, 0);
7011 		if (err)
7012 			goto unlock;
7013 
7014 		hci_req_update_scan(hdev);
7015 
7016 		goto added;
7017 	}
7018 
7019 	addr_type = le_addr_type(cp->addr.type);
7020 
7021 	if (cp->action == 0x02)
7022 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7023 	else if (cp->action == 0x01)
7024 		auto_conn = HCI_AUTO_CONN_DIRECT;
7025 	else
7026 		auto_conn = HCI_AUTO_CONN_REPORT;
7027 
7028 	/* Kernel internally uses conn_params with resolvable private
7029 	 * address, but Add Device allows only identity addresses.
7030 	 * Make sure it is enforced before calling
7031 	 * hci_conn_params_lookup.
7032 	 */
7033 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7035 					MGMT_STATUS_INVALID_PARAMS,
7036 					&cp->addr, sizeof(cp->addr));
7037 		goto unlock;
7038 	}
7039 
7040 	/* If the connection parameters don't exist for this device,
7041 	 * they will be created and configured with defaults.
7042 	 */
7043 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7044 				auto_conn) < 0) {
7045 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7046 					MGMT_STATUS_FAILED, &cp->addr,
7047 					sizeof(cp->addr));
7048 		goto unlock;
7049 	} else {
7050 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7051 						addr_type);
7052 		if (params)
7053 			current_flags = params->current_flags;
7054 	}
7055 
7056 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7057 	if (err < 0)
7058 		goto unlock;
7059 
7060 added:
7061 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7062 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7063 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
7064 
7065 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7066 				MGMT_STATUS_SUCCESS, &cp->addr,
7067 				sizeof(cp->addr));
7068 
7069 unlock:
7070 	hci_dev_unlock(hdev);
7071 	return err;
7072 }
7073 
7074 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7075 			   bdaddr_t *bdaddr, u8 type)
7076 {
7077 	struct mgmt_ev_device_removed ev;
7078 
7079 	bacpy(&ev.addr.bdaddr, bdaddr);
7080 	ev.addr.type = type;
7081 
7082 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7083 }
7084 
7085 static int remove_device_sync(struct hci_dev *hdev, void *data)
7086 {
7087 	return hci_update_passive_scan_sync(hdev);
7088 }
7089 
7090 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7091 			 void *data, u16 len)
7092 {
7093 	struct mgmt_cp_remove_device *cp = data;
7094 	int err;
7095 
7096 	bt_dev_dbg(hdev, "sock %p", sk);
7097 
7098 	hci_dev_lock(hdev);
7099 
7100 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7101 		struct hci_conn_params *params;
7102 		u8 addr_type;
7103 
7104 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7105 			err = mgmt_cmd_complete(sk, hdev->id,
7106 						MGMT_OP_REMOVE_DEVICE,
7107 						MGMT_STATUS_INVALID_PARAMS,
7108 						&cp->addr, sizeof(cp->addr));
7109 			goto unlock;
7110 		}
7111 
7112 		if (cp->addr.type == BDADDR_BREDR) {
7113 			err = hci_bdaddr_list_del(&hdev->accept_list,
7114 						  &cp->addr.bdaddr,
7115 						  cp->addr.type);
7116 			if (err) {
7117 				err = mgmt_cmd_complete(sk, hdev->id,
7118 							MGMT_OP_REMOVE_DEVICE,
7119 							MGMT_STATUS_INVALID_PARAMS,
7120 							&cp->addr,
7121 							sizeof(cp->addr));
7122 				goto unlock;
7123 			}
7124 
7125 			hci_req_update_scan(hdev);
7126 
7127 			device_removed(sk, hdev, &cp->addr.bdaddr,
7128 				       cp->addr.type);
7129 			goto complete;
7130 		}
7131 
7132 		addr_type = le_addr_type(cp->addr.type);
7133 
7134 		/* Kernel internally uses conn_params with resolvable private
7135 		 * address, but Remove Device allows only identity addresses.
7136 		 * Make sure it is enforced before calling
7137 		 * hci_conn_params_lookup.
7138 		 */
7139 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7140 			err = mgmt_cmd_complete(sk, hdev->id,
7141 						MGMT_OP_REMOVE_DEVICE,
7142 						MGMT_STATUS_INVALID_PARAMS,
7143 						&cp->addr, sizeof(cp->addr));
7144 			goto unlock;
7145 		}
7146 
7147 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7148 						addr_type);
7149 		if (!params) {
7150 			err = mgmt_cmd_complete(sk, hdev->id,
7151 						MGMT_OP_REMOVE_DEVICE,
7152 						MGMT_STATUS_INVALID_PARAMS,
7153 						&cp->addr, sizeof(cp->addr));
7154 			goto unlock;
7155 		}
7156 
7157 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7158 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7159 			err = mgmt_cmd_complete(sk, hdev->id,
7160 						MGMT_OP_REMOVE_DEVICE,
7161 						MGMT_STATUS_INVALID_PARAMS,
7162 						&cp->addr, sizeof(cp->addr));
7163 			goto unlock;
7164 		}
7165 
7166 		list_del(&params->action);
7167 		list_del(&params->list);
7168 		kfree(params);
7169 
7170 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7171 	} else {
7172 		struct hci_conn_params *p, *tmp;
7173 		struct bdaddr_list *b, *btmp;
7174 
7175 		if (cp->addr.type) {
7176 			err = mgmt_cmd_complete(sk, hdev->id,
7177 						MGMT_OP_REMOVE_DEVICE,
7178 						MGMT_STATUS_INVALID_PARAMS,
7179 						&cp->addr, sizeof(cp->addr));
7180 			goto unlock;
7181 		}
7182 
7183 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7184 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7185 			list_del(&b->list);
7186 			kfree(b);
7187 		}
7188 
7189 		hci_req_update_scan(hdev);
7190 
7191 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7192 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7193 				continue;
7194 			device_removed(sk, hdev, &p->addr, p->addr_type);
7195 			if (p->explicit_connect) {
7196 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7197 				continue;
7198 			}
7199 			list_del(&p->action);
7200 			list_del(&p->list);
7201 			kfree(p);
7202 		}
7203 
7204 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7205 	}
7206 
7207 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7208 
7209 complete:
7210 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7211 				MGMT_STATUS_SUCCESS, &cp->addr,
7212 				sizeof(cp->addr));
7213 unlock:
7214 	hci_dev_unlock(hdev);
7215 	return err;
7216 }
7217 
7218 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7219 			   u16 len)
7220 {
7221 	struct mgmt_cp_load_conn_param *cp = data;
7222 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7223 				     sizeof(struct mgmt_conn_param));
7224 	u16 param_count, expected_len;
7225 	int i;
7226 
7227 	if (!lmp_le_capable(hdev))
7228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7229 				       MGMT_STATUS_NOT_SUPPORTED);
7230 
7231 	param_count = __le16_to_cpu(cp->param_count);
7232 	if (param_count > max_param_count) {
7233 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7234 			   param_count);
7235 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7236 				       MGMT_STATUS_INVALID_PARAMS);
7237 	}
7238 
7239 	expected_len = struct_size(cp, params, param_count);
7240 	if (expected_len != len) {
7241 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7242 			   expected_len, len);
7243 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7244 				       MGMT_STATUS_INVALID_PARAMS);
7245 	}
7246 
7247 	bt_dev_dbg(hdev, "param_count %u", param_count);
7248 
7249 	hci_dev_lock(hdev);
7250 
7251 	hci_conn_params_clear_disabled(hdev);
7252 
7253 	for (i = 0; i < param_count; i++) {
7254 		struct mgmt_conn_param *param = &cp->params[i];
7255 		struct hci_conn_params *hci_param;
7256 		u16 min, max, latency, timeout;
7257 		u8 addr_type;
7258 
7259 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7260 			   param->addr.type);
7261 
7262 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7263 			addr_type = ADDR_LE_DEV_PUBLIC;
7264 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7265 			addr_type = ADDR_LE_DEV_RANDOM;
7266 		} else {
7267 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7268 			continue;
7269 		}
7270 
7271 		min = le16_to_cpu(param->min_interval);
7272 		max = le16_to_cpu(param->max_interval);
7273 		latency = le16_to_cpu(param->latency);
7274 		timeout = le16_to_cpu(param->timeout);
7275 
7276 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7277 			   min, max, latency, timeout);
7278 
7279 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7280 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7281 			continue;
7282 		}
7283 
7284 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7285 						addr_type);
7286 		if (!hci_param) {
7287 			bt_dev_err(hdev, "failed to add connection parameters");
7288 			continue;
7289 		}
7290 
7291 		hci_param->conn_min_interval = min;
7292 		hci_param->conn_max_interval = max;
7293 		hci_param->conn_latency = latency;
7294 		hci_param->supervision_timeout = timeout;
7295 	}
7296 
7297 	hci_dev_unlock(hdev);
7298 
7299 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7300 				 NULL, 0);
7301 }
7302 
7303 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7304 			       void *data, u16 len)
7305 {
7306 	struct mgmt_cp_set_external_config *cp = data;
7307 	bool changed;
7308 	int err;
7309 
7310 	bt_dev_dbg(hdev, "sock %p", sk);
7311 
7312 	if (hdev_is_powered(hdev))
7313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7314 				       MGMT_STATUS_REJECTED);
7315 
7316 	if (cp->config != 0x00 && cp->config != 0x01)
7317 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7318 				         MGMT_STATUS_INVALID_PARAMS);
7319 
7320 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7322 				       MGMT_STATUS_NOT_SUPPORTED);
7323 
7324 	hci_dev_lock(hdev);
7325 
7326 	if (cp->config)
7327 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7328 	else
7329 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7330 
7331 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7332 	if (err < 0)
7333 		goto unlock;
7334 
7335 	if (!changed)
7336 		goto unlock;
7337 
7338 	err = new_options(hdev, sk);
7339 
7340 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7341 		mgmt_index_removed(hdev);
7342 
7343 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7344 			hci_dev_set_flag(hdev, HCI_CONFIG);
7345 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7346 
7347 			queue_work(hdev->req_workqueue, &hdev->power_on);
7348 		} else {
7349 			set_bit(HCI_RAW, &hdev->flags);
7350 			mgmt_index_added(hdev);
7351 		}
7352 	}
7353 
7354 unlock:
7355 	hci_dev_unlock(hdev);
7356 	return err;
7357 }
7358 
7359 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7360 			      void *data, u16 len)
7361 {
7362 	struct mgmt_cp_set_public_address *cp = data;
7363 	bool changed;
7364 	int err;
7365 
7366 	bt_dev_dbg(hdev, "sock %p", sk);
7367 
7368 	if (hdev_is_powered(hdev))
7369 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7370 				       MGMT_STATUS_REJECTED);
7371 
7372 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7373 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7374 				       MGMT_STATUS_INVALID_PARAMS);
7375 
7376 	if (!hdev->set_bdaddr)
7377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7378 				       MGMT_STATUS_NOT_SUPPORTED);
7379 
7380 	hci_dev_lock(hdev);
7381 
7382 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7383 	bacpy(&hdev->public_addr, &cp->bdaddr);
7384 
7385 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7386 	if (err < 0)
7387 		goto unlock;
7388 
7389 	if (!changed)
7390 		goto unlock;
7391 
7392 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7393 		err = new_options(hdev, sk);
7394 
7395 	if (is_configured(hdev)) {
7396 		mgmt_index_removed(hdev);
7397 
7398 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7399 
7400 		hci_dev_set_flag(hdev, HCI_CONFIG);
7401 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7402 
7403 		queue_work(hdev->req_workqueue, &hdev->power_on);
7404 	}
7405 
7406 unlock:
7407 	hci_dev_unlock(hdev);
7408 	return err;
7409 }
7410 
7411 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7412 					     int err)
7413 {
7414 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7415 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7416 	u8 *h192, *r192, *h256, *r256;
7417 	struct mgmt_pending_cmd *cmd = data;
7418 	struct sk_buff *skb = cmd->skb;
7419 	u8 status = mgmt_status(err);
7420 	u16 eir_len;
7421 
7422 	if (!status) {
7423 		if (!skb)
7424 			status = MGMT_STATUS_FAILED;
7425 		else if (IS_ERR(skb))
7426 			status = mgmt_status(PTR_ERR(skb));
7427 		else
7428 			status = mgmt_status(skb->data[0]);
7429 	}
7430 
7431 	bt_dev_dbg(hdev, "status %u", status);
7432 
7433 	mgmt_cp = cmd->param;
7434 
7435 	if (status) {
7436 		status = mgmt_status(status);
7437 		eir_len = 0;
7438 
7439 		h192 = NULL;
7440 		r192 = NULL;
7441 		h256 = NULL;
7442 		r256 = NULL;
7443 	} else if (!bredr_sc_enabled(hdev)) {
7444 		struct hci_rp_read_local_oob_data *rp;
7445 
7446 		if (skb->len != sizeof(*rp)) {
7447 			status = MGMT_STATUS_FAILED;
7448 			eir_len = 0;
7449 		} else {
7450 			status = MGMT_STATUS_SUCCESS;
7451 			rp = (void *)skb->data;
7452 
7453 			eir_len = 5 + 18 + 18;
7454 			h192 = rp->hash;
7455 			r192 = rp->rand;
7456 			h256 = NULL;
7457 			r256 = NULL;
7458 		}
7459 	} else {
7460 		struct hci_rp_read_local_oob_ext_data *rp;
7461 
7462 		if (skb->len != sizeof(*rp)) {
7463 			status = MGMT_STATUS_FAILED;
7464 			eir_len = 0;
7465 		} else {
7466 			status = MGMT_STATUS_SUCCESS;
7467 			rp = (void *)skb->data;
7468 
7469 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7470 				eir_len = 5 + 18 + 18;
7471 				h192 = NULL;
7472 				r192 = NULL;
7473 			} else {
7474 				eir_len = 5 + 18 + 18 + 18 + 18;
7475 				h192 = rp->hash192;
7476 				r192 = rp->rand192;
7477 			}
7478 
7479 			h256 = rp->hash256;
7480 			r256 = rp->rand256;
7481 		}
7482 	}
7483 
7484 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7485 	if (!mgmt_rp)
7486 		goto done;
7487 
7488 	if (eir_len == 0)
7489 		goto send_rsp;
7490 
7491 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7492 				  hdev->dev_class, 3);
7493 
7494 	if (h192 && r192) {
7495 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7496 					  EIR_SSP_HASH_C192, h192, 16);
7497 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7498 					  EIR_SSP_RAND_R192, r192, 16);
7499 	}
7500 
7501 	if (h256 && r256) {
7502 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7503 					  EIR_SSP_HASH_C256, h256, 16);
7504 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7505 					  EIR_SSP_RAND_R256, r256, 16);
7506 	}
7507 
7508 send_rsp:
7509 	mgmt_rp->type = mgmt_cp->type;
7510 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7511 
7512 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7513 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7514 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7515 	if (err < 0 || status)
7516 		goto done;
7517 
7518 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7519 
7520 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7521 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7522 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7523 done:
7524 	if (skb && !IS_ERR(skb))
7525 		kfree_skb(skb);
7526 
7527 	kfree(mgmt_rp);
7528 	mgmt_pending_remove(cmd);
7529 }
7530 
7531 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7532 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7533 {
7534 	struct mgmt_pending_cmd *cmd;
7535 	int err;
7536 
7537 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7538 			       cp, sizeof(*cp));
7539 	if (!cmd)
7540 		return -ENOMEM;
7541 
7542 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7543 				 read_local_oob_ext_data_complete);
7544 
7545 	if (err < 0) {
7546 		mgmt_pending_remove(cmd);
7547 		return err;
7548 	}
7549 
7550 	return 0;
7551 }
7552 
7553 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7554 				   void *data, u16 data_len)
7555 {
7556 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7557 	struct mgmt_rp_read_local_oob_ext_data *rp;
7558 	size_t rp_len;
7559 	u16 eir_len;
7560 	u8 status, flags, role, addr[7], hash[16], rand[16];
7561 	int err;
7562 
7563 	bt_dev_dbg(hdev, "sock %p", sk);
7564 
7565 	if (hdev_is_powered(hdev)) {
7566 		switch (cp->type) {
7567 		case BIT(BDADDR_BREDR):
7568 			status = mgmt_bredr_support(hdev);
7569 			if (status)
7570 				eir_len = 0;
7571 			else
7572 				eir_len = 5;
7573 			break;
7574 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7575 			status = mgmt_le_support(hdev);
7576 			if (status)
7577 				eir_len = 0;
7578 			else
7579 				eir_len = 9 + 3 + 18 + 18 + 3;
7580 			break;
7581 		default:
7582 			status = MGMT_STATUS_INVALID_PARAMS;
7583 			eir_len = 0;
7584 			break;
7585 		}
7586 	} else {
7587 		status = MGMT_STATUS_NOT_POWERED;
7588 		eir_len = 0;
7589 	}
7590 
7591 	rp_len = sizeof(*rp) + eir_len;
7592 	rp = kmalloc(rp_len, GFP_ATOMIC);
7593 	if (!rp)
7594 		return -ENOMEM;
7595 
7596 	if (!status && !lmp_ssp_capable(hdev)) {
7597 		status = MGMT_STATUS_NOT_SUPPORTED;
7598 		eir_len = 0;
7599 	}
7600 
7601 	if (status)
7602 		goto complete;
7603 
7604 	hci_dev_lock(hdev);
7605 
7606 	eir_len = 0;
7607 	switch (cp->type) {
7608 	case BIT(BDADDR_BREDR):
7609 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7610 			err = read_local_ssp_oob_req(hdev, sk, cp);
7611 			hci_dev_unlock(hdev);
7612 			if (!err)
7613 				goto done;
7614 
7615 			status = MGMT_STATUS_FAILED;
7616 			goto complete;
7617 		} else {
7618 			eir_len = eir_append_data(rp->eir, eir_len,
7619 						  EIR_CLASS_OF_DEV,
7620 						  hdev->dev_class, 3);
7621 		}
7622 		break;
7623 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7624 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7625 		    smp_generate_oob(hdev, hash, rand) < 0) {
7626 			hci_dev_unlock(hdev);
7627 			status = MGMT_STATUS_FAILED;
7628 			goto complete;
7629 		}
7630 
7631 		/* This should return the active RPA, but since the RPA
7632 		 * is only programmed on demand, it is really hard to fill
7633 		 * this in at the moment. For now disallow retrieving
7634 		 * local out-of-band data when privacy is in use.
7635 		 *
7636 		 * Returning the identity address will not help here since
7637 		 * pairing happens before the identity resolving key is
7638 		 * known and thus the connection establishment happens
7639 		 * based on the RPA and not the identity address.
7640 		 */
7641 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7642 			hci_dev_unlock(hdev);
7643 			status = MGMT_STATUS_REJECTED;
7644 			goto complete;
7645 		}
7646 
7647 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7648 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7649 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7650 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7651 			memcpy(addr, &hdev->static_addr, 6);
7652 			addr[6] = 0x01;
7653 		} else {
7654 			memcpy(addr, &hdev->bdaddr, 6);
7655 			addr[6] = 0x00;
7656 		}
7657 
7658 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7659 					  addr, sizeof(addr));
7660 
7661 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7662 			role = 0x02;
7663 		else
7664 			role = 0x01;
7665 
7666 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7667 					  &role, sizeof(role));
7668 
7669 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7670 			eir_len = eir_append_data(rp->eir, eir_len,
7671 						  EIR_LE_SC_CONFIRM,
7672 						  hash, sizeof(hash));
7673 
7674 			eir_len = eir_append_data(rp->eir, eir_len,
7675 						  EIR_LE_SC_RANDOM,
7676 						  rand, sizeof(rand));
7677 		}
7678 
7679 		flags = mgmt_get_adv_discov_flags(hdev);
7680 
7681 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7682 			flags |= LE_AD_NO_BREDR;
7683 
7684 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7685 					  &flags, sizeof(flags));
7686 		break;
7687 	}
7688 
7689 	hci_dev_unlock(hdev);
7690 
7691 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7692 
7693 	status = MGMT_STATUS_SUCCESS;
7694 
7695 complete:
7696 	rp->type = cp->type;
7697 	rp->eir_len = cpu_to_le16(eir_len);
7698 
7699 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7700 				status, rp, sizeof(*rp) + eir_len);
7701 	if (err < 0 || status)
7702 		goto done;
7703 
7704 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7705 				 rp, sizeof(*rp) + eir_len,
7706 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7707 
7708 done:
7709 	kfree(rp);
7710 
7711 	return err;
7712 }
7713 
7714 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7715 {
7716 	u32 flags = 0;
7717 
7718 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7719 	flags |= MGMT_ADV_FLAG_DISCOV;
7720 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7721 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7722 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7723 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7724 	flags |= MGMT_ADV_PARAM_DURATION;
7725 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7726 	flags |= MGMT_ADV_PARAM_INTERVALS;
7727 	flags |= MGMT_ADV_PARAM_TX_POWER;
7728 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7729 
7730 	/* In extended adv TX_POWER returned from Set Adv Param
7731 	 * will be always valid.
7732 	 */
7733 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7734 	    ext_adv_capable(hdev))
7735 		flags |= MGMT_ADV_FLAG_TX_POWER;
7736 
7737 	if (ext_adv_capable(hdev)) {
7738 		flags |= MGMT_ADV_FLAG_SEC_1M;
7739 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7740 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7741 
7742 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7743 			flags |= MGMT_ADV_FLAG_SEC_2M;
7744 
7745 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7746 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7747 	}
7748 
7749 	return flags;
7750 }
7751 
7752 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7753 			     void *data, u16 data_len)
7754 {
7755 	struct mgmt_rp_read_adv_features *rp;
7756 	size_t rp_len;
7757 	int err;
7758 	struct adv_info *adv_instance;
7759 	u32 supported_flags;
7760 	u8 *instance;
7761 
7762 	bt_dev_dbg(hdev, "sock %p", sk);
7763 
7764 	if (!lmp_le_capable(hdev))
7765 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7766 				       MGMT_STATUS_REJECTED);
7767 
7768 	hci_dev_lock(hdev);
7769 
7770 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7771 	rp = kmalloc(rp_len, GFP_ATOMIC);
7772 	if (!rp) {
7773 		hci_dev_unlock(hdev);
7774 		return -ENOMEM;
7775 	}
7776 
7777 	supported_flags = get_supported_adv_flags(hdev);
7778 
7779 	rp->supported_flags = cpu_to_le32(supported_flags);
7780 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7781 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7782 	rp->max_instances = hdev->le_num_of_adv_sets;
7783 	rp->num_instances = hdev->adv_instance_cnt;
7784 
7785 	instance = rp->instance;
7786 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7787 		*instance = adv_instance->instance;
7788 		instance++;
7789 	}
7790 
7791 	hci_dev_unlock(hdev);
7792 
7793 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7794 				MGMT_STATUS_SUCCESS, rp, rp_len);
7795 
7796 	kfree(rp);
7797 
7798 	return err;
7799 }
7800 
7801 static u8 calculate_name_len(struct hci_dev *hdev)
7802 {
7803 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7804 
7805 	return eir_append_local_name(hdev, buf, 0);
7806 }
7807 
7808 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7809 			   bool is_adv_data)
7810 {
7811 	u8 max_len = HCI_MAX_AD_LENGTH;
7812 
7813 	if (is_adv_data) {
7814 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7815 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7816 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7817 			max_len -= 3;
7818 
7819 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7820 			max_len -= 3;
7821 	} else {
7822 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7823 			max_len -= calculate_name_len(hdev);
7824 
7825 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7826 			max_len -= 4;
7827 	}
7828 
7829 	return max_len;
7830 }
7831 
7832 static bool flags_managed(u32 adv_flags)
7833 {
7834 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7835 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7836 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7837 }
7838 
7839 static bool tx_power_managed(u32 adv_flags)
7840 {
7841 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7842 }
7843 
7844 static bool name_managed(u32 adv_flags)
7845 {
7846 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7847 }
7848 
7849 static bool appearance_managed(u32 adv_flags)
7850 {
7851 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7852 }
7853 
7854 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7855 			      u8 len, bool is_adv_data)
7856 {
7857 	int i, cur_len;
7858 	u8 max_len;
7859 
7860 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7861 
7862 	if (len > max_len)
7863 		return false;
7864 
7865 	/* Make sure that the data is correctly formatted. */
7866 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7867 		cur_len = data[i];
7868 
7869 		if (!cur_len)
7870 			continue;
7871 
7872 		if (data[i + 1] == EIR_FLAGS &&
7873 		    (!is_adv_data || flags_managed(adv_flags)))
7874 			return false;
7875 
7876 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7877 			return false;
7878 
7879 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7880 			return false;
7881 
7882 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7883 			return false;
7884 
7885 		if (data[i + 1] == EIR_APPEARANCE &&
7886 		    appearance_managed(adv_flags))
7887 			return false;
7888 
7889 		/* If the current field length would exceed the total data
7890 		 * length, then it's invalid.
7891 		 */
7892 		if (i + cur_len >= len)
7893 			return false;
7894 	}
7895 
7896 	return true;
7897 }
7898 
7899 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7900 {
7901 	u32 supported_flags, phy_flags;
7902 
7903 	/* The current implementation only supports a subset of the specified
7904 	 * flags. Also need to check mutual exclusiveness of sec flags.
7905 	 */
7906 	supported_flags = get_supported_adv_flags(hdev);
7907 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7908 	if (adv_flags & ~supported_flags ||
7909 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7910 		return false;
7911 
7912 	return true;
7913 }
7914 
7915 static bool adv_busy(struct hci_dev *hdev)
7916 {
7917 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7918 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7919 		pending_find(MGMT_OP_SET_LE, hdev) ||
7920 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7921 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7922 }
7923 
7924 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
7925 			     int err)
7926 {
7927 	struct adv_info *adv, *n;
7928 
7929 	bt_dev_dbg(hdev, "err %d", err);
7930 
7931 	hci_dev_lock(hdev);
7932 
7933 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
7934 		u8 instance;
7935 
7936 		if (!adv->pending)
7937 			continue;
7938 
7939 		if (!err) {
7940 			adv->pending = false;
7941 			continue;
7942 		}
7943 
7944 		instance = adv->instance;
7945 
7946 		if (hdev->cur_adv_instance == instance)
7947 			cancel_adv_timeout(hdev);
7948 
7949 		hci_remove_adv_instance(hdev, instance);
7950 		mgmt_advertising_removed(sk, hdev, instance);
7951 	}
7952 
7953 	hci_dev_unlock(hdev);
7954 }
7955 
7956 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
7957 {
7958 	struct mgmt_pending_cmd *cmd = data;
7959 	struct mgmt_cp_add_advertising *cp = cmd->param;
7960 	struct mgmt_rp_add_advertising rp;
7961 
7962 	memset(&rp, 0, sizeof(rp));
7963 
7964 	rp.instance = cp->instance;
7965 
7966 	if (err)
7967 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7968 				mgmt_status(err));
7969 	else
7970 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7971 				  mgmt_status(err), &rp, sizeof(rp));
7972 
7973 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
7974 
7975 	mgmt_pending_free(cmd);
7976 }
7977 
7978 static int add_advertising_sync(struct hci_dev *hdev, void *data)
7979 {
7980 	struct mgmt_pending_cmd *cmd = data;
7981 	struct mgmt_cp_add_advertising *cp = cmd->param;
7982 
7983 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
7984 }
7985 
7986 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7987 			   void *data, u16 data_len)
7988 {
7989 	struct mgmt_cp_add_advertising *cp = data;
7990 	struct mgmt_rp_add_advertising rp;
7991 	u32 flags;
7992 	u8 status;
7993 	u16 timeout, duration;
7994 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7995 	u8 schedule_instance = 0;
7996 	struct adv_info *next_instance;
7997 	int err;
7998 	struct mgmt_pending_cmd *cmd;
7999 
8000 	bt_dev_dbg(hdev, "sock %p", sk);
8001 
8002 	status = mgmt_le_support(hdev);
8003 	if (status)
8004 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8005 				       status);
8006 
8007 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8008 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8009 				       MGMT_STATUS_INVALID_PARAMS);
8010 
8011 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8012 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8013 				       MGMT_STATUS_INVALID_PARAMS);
8014 
8015 	flags = __le32_to_cpu(cp->flags);
8016 	timeout = __le16_to_cpu(cp->timeout);
8017 	duration = __le16_to_cpu(cp->duration);
8018 
8019 	if (!requested_adv_flags_are_valid(hdev, flags))
8020 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8021 				       MGMT_STATUS_INVALID_PARAMS);
8022 
8023 	hci_dev_lock(hdev);
8024 
8025 	if (timeout && !hdev_is_powered(hdev)) {
8026 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8027 				      MGMT_STATUS_REJECTED);
8028 		goto unlock;
8029 	}
8030 
8031 	if (adv_busy(hdev)) {
8032 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8033 				      MGMT_STATUS_BUSY);
8034 		goto unlock;
8035 	}
8036 
8037 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8038 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8039 			       cp->scan_rsp_len, false)) {
8040 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8041 				      MGMT_STATUS_INVALID_PARAMS);
8042 		goto unlock;
8043 	}
8044 
8045 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8046 				   cp->adv_data_len, cp->data,
8047 				   cp->scan_rsp_len,
8048 				   cp->data + cp->adv_data_len,
8049 				   timeout, duration,
8050 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8051 				   hdev->le_adv_min_interval,
8052 				   hdev->le_adv_max_interval);
8053 	if (err < 0) {
8054 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8055 				      MGMT_STATUS_FAILED);
8056 		goto unlock;
8057 	}
8058 
8059 	/* Only trigger an advertising added event if a new instance was
8060 	 * actually added.
8061 	 */
8062 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8063 		mgmt_advertising_added(sk, hdev, cp->instance);
8064 
8065 	if (hdev->cur_adv_instance == cp->instance) {
8066 		/* If the currently advertised instance is being changed then
8067 		 * cancel the current advertising and schedule the next
8068 		 * instance. If there is only one instance then the overridden
8069 		 * advertising data will be visible right away.
8070 		 */
8071 		cancel_adv_timeout(hdev);
8072 
8073 		next_instance = hci_get_next_instance(hdev, cp->instance);
8074 		if (next_instance)
8075 			schedule_instance = next_instance->instance;
8076 	} else if (!hdev->adv_instance_timeout) {
8077 		/* Immediately advertise the new instance if no other
8078 		 * instance is currently being advertised.
8079 		 */
8080 		schedule_instance = cp->instance;
8081 	}
8082 
8083 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8084 	 * there is no instance to be advertised then we have no HCI
8085 	 * communication to make. Simply return.
8086 	 */
8087 	if (!hdev_is_powered(hdev) ||
8088 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8089 	    !schedule_instance) {
8090 		rp.instance = cp->instance;
8091 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8092 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8093 		goto unlock;
8094 	}
8095 
8096 	/* We're good to go, update advertising data, parameters, and start
8097 	 * advertising.
8098 	 */
8099 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8100 			       data_len);
8101 	if (!cmd) {
8102 		err = -ENOMEM;
8103 		goto unlock;
8104 	}
8105 
8106 	cp->instance = schedule_instance;
8107 
8108 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8109 				 add_advertising_complete);
8110 	if (err < 0)
8111 		mgmt_pending_free(cmd);
8112 
8113 unlock:
8114 	hci_dev_unlock(hdev);
8115 
8116 	return err;
8117 }
8118 
8119 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8120 					int err)
8121 {
8122 	struct mgmt_pending_cmd *cmd = data;
8123 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8124 	struct mgmt_rp_add_ext_adv_params rp;
8125 	struct adv_info *adv;
8126 	u32 flags;
8127 
8128 	BT_DBG("%s", hdev->name);
8129 
8130 	hci_dev_lock(hdev);
8131 
8132 	adv = hci_find_adv_instance(hdev, cp->instance);
8133 	if (!adv)
8134 		goto unlock;
8135 
8136 	rp.instance = cp->instance;
8137 	rp.tx_power = adv->tx_power;
8138 
8139 	/* While we're at it, inform userspace of the available space for this
8140 	 * advertisement, given the flags that will be used.
8141 	 */
8142 	flags = __le32_to_cpu(cp->flags);
8143 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8144 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8145 
8146 	if (err) {
8147 		/* If this advertisement was previously advertising and we
8148 		 * failed to update it, we signal that it has been removed and
8149 		 * delete its structure
8150 		 */
8151 		if (!adv->pending)
8152 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8153 
8154 		hci_remove_adv_instance(hdev, cp->instance);
8155 
8156 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8157 				mgmt_status(err));
8158 	} else {
8159 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8160 				  mgmt_status(err), &rp, sizeof(rp));
8161 	}
8162 
8163 unlock:
8164 	if (cmd)
8165 		mgmt_pending_free(cmd);
8166 
8167 	hci_dev_unlock(hdev);
8168 }
8169 
8170 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8171 {
8172 	struct mgmt_pending_cmd *cmd = data;
8173 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8174 
8175 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8176 }
8177 
8178 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8179 			      void *data, u16 data_len)
8180 {
8181 	struct mgmt_cp_add_ext_adv_params *cp = data;
8182 	struct mgmt_rp_add_ext_adv_params rp;
8183 	struct mgmt_pending_cmd *cmd = NULL;
8184 	u32 flags, min_interval, max_interval;
8185 	u16 timeout, duration;
8186 	u8 status;
8187 	s8 tx_power;
8188 	int err;
8189 
8190 	BT_DBG("%s", hdev->name);
8191 
8192 	status = mgmt_le_support(hdev);
8193 	if (status)
8194 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8195 				       status);
8196 
8197 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8198 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8199 				       MGMT_STATUS_INVALID_PARAMS);
8200 
8201 	/* The purpose of breaking add_advertising into two separate MGMT calls
8202 	 * for params and data is to allow more parameters to be added to this
8203 	 * structure in the future. For this reason, we verify that we have the
8204 	 * bare minimum structure we know of when the interface was defined. Any
8205 	 * extra parameters we don't know about will be ignored in this request.
8206 	 */
8207 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8209 				       MGMT_STATUS_INVALID_PARAMS);
8210 
8211 	flags = __le32_to_cpu(cp->flags);
8212 
8213 	if (!requested_adv_flags_are_valid(hdev, flags))
8214 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8215 				       MGMT_STATUS_INVALID_PARAMS);
8216 
8217 	hci_dev_lock(hdev);
8218 
8219 	/* In new interface, we require that we are powered to register */
8220 	if (!hdev_is_powered(hdev)) {
8221 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8222 				      MGMT_STATUS_REJECTED);
8223 		goto unlock;
8224 	}
8225 
8226 	if (adv_busy(hdev)) {
8227 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8228 				      MGMT_STATUS_BUSY);
8229 		goto unlock;
8230 	}
8231 
8232 	/* Parse defined parameters from request, use defaults otherwise */
8233 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8234 		  __le16_to_cpu(cp->timeout) : 0;
8235 
8236 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8237 		   __le16_to_cpu(cp->duration) :
8238 		   hdev->def_multi_adv_rotation_duration;
8239 
8240 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8241 		       __le32_to_cpu(cp->min_interval) :
8242 		       hdev->le_adv_min_interval;
8243 
8244 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8245 		       __le32_to_cpu(cp->max_interval) :
8246 		       hdev->le_adv_max_interval;
8247 
8248 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8249 		   cp->tx_power :
8250 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8251 
8252 	/* Create advertising instance with no advertising or response data */
8253 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8254 				   0, NULL, 0, NULL, timeout, duration,
8255 				   tx_power, min_interval, max_interval);
8256 
8257 	if (err < 0) {
8258 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8259 				      MGMT_STATUS_FAILED);
8260 		goto unlock;
8261 	}
8262 
8263 	/* Submit request for advertising params if ext adv available */
8264 	if (ext_adv_capable(hdev)) {
8265 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8266 				       data, data_len);
8267 		if (!cmd) {
8268 			err = -ENOMEM;
8269 			hci_remove_adv_instance(hdev, cp->instance);
8270 			goto unlock;
8271 		}
8272 
8273 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8274 					 add_ext_adv_params_complete);
8275 		if (err < 0)
8276 			mgmt_pending_free(cmd);
8277 	} else {
8278 		rp.instance = cp->instance;
8279 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8280 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8281 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8282 		err = mgmt_cmd_complete(sk, hdev->id,
8283 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8284 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8285 	}
8286 
8287 unlock:
8288 	hci_dev_unlock(hdev);
8289 
8290 	return err;
8291 }
8292 
8293 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8294 {
8295 	struct mgmt_pending_cmd *cmd = data;
8296 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8297 	struct mgmt_rp_add_advertising rp;
8298 
8299 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8300 
8301 	memset(&rp, 0, sizeof(rp));
8302 
8303 	rp.instance = cp->instance;
8304 
8305 	if (err)
8306 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8307 				mgmt_status(err));
8308 	else
8309 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8310 				  mgmt_status(err), &rp, sizeof(rp));
8311 
8312 	mgmt_pending_free(cmd);
8313 }
8314 
8315 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8316 {
8317 	struct mgmt_pending_cmd *cmd = data;
8318 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8319 	int err;
8320 
8321 	if (ext_adv_capable(hdev)) {
8322 		err = hci_update_adv_data_sync(hdev, cp->instance);
8323 		if (err)
8324 			return err;
8325 
8326 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8327 		if (err)
8328 			return err;
8329 
8330 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8331 	}
8332 
8333 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8334 }
8335 
8336 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8337 			    u16 data_len)
8338 {
8339 	struct mgmt_cp_add_ext_adv_data *cp = data;
8340 	struct mgmt_rp_add_ext_adv_data rp;
8341 	u8 schedule_instance = 0;
8342 	struct adv_info *next_instance;
8343 	struct adv_info *adv_instance;
8344 	int err = 0;
8345 	struct mgmt_pending_cmd *cmd;
8346 
8347 	BT_DBG("%s", hdev->name);
8348 
8349 	hci_dev_lock(hdev);
8350 
8351 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8352 
8353 	if (!adv_instance) {
8354 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8355 				      MGMT_STATUS_INVALID_PARAMS);
8356 		goto unlock;
8357 	}
8358 
8359 	/* In new interface, we require that we are powered to register */
8360 	if (!hdev_is_powered(hdev)) {
8361 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8362 				      MGMT_STATUS_REJECTED);
8363 		goto clear_new_instance;
8364 	}
8365 
8366 	if (adv_busy(hdev)) {
8367 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8368 				      MGMT_STATUS_BUSY);
8369 		goto clear_new_instance;
8370 	}
8371 
8372 	/* Validate new data */
8373 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8374 			       cp->adv_data_len, true) ||
8375 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8376 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8377 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8378 				      MGMT_STATUS_INVALID_PARAMS);
8379 		goto clear_new_instance;
8380 	}
8381 
8382 	/* Set the data in the advertising instance */
8383 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8384 				  cp->data, cp->scan_rsp_len,
8385 				  cp->data + cp->adv_data_len);
8386 
8387 	/* If using software rotation, determine next instance to use */
8388 	if (hdev->cur_adv_instance == cp->instance) {
8389 		/* If the currently advertised instance is being changed
8390 		 * then cancel the current advertising and schedule the
8391 		 * next instance. If there is only one instance then the
8392 		 * overridden advertising data will be visible right
8393 		 * away
8394 		 */
8395 		cancel_adv_timeout(hdev);
8396 
8397 		next_instance = hci_get_next_instance(hdev, cp->instance);
8398 		if (next_instance)
8399 			schedule_instance = next_instance->instance;
8400 	} else if (!hdev->adv_instance_timeout) {
8401 		/* Immediately advertise the new instance if no other
8402 		 * instance is currently being advertised.
8403 		 */
8404 		schedule_instance = cp->instance;
8405 	}
8406 
8407 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8408 	 * be advertised then we have no HCI communication to make.
8409 	 * Simply return.
8410 	 */
8411 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8412 		if (adv_instance->pending) {
8413 			mgmt_advertising_added(sk, hdev, cp->instance);
8414 			adv_instance->pending = false;
8415 		}
8416 		rp.instance = cp->instance;
8417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8418 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8419 		goto unlock;
8420 	}
8421 
8422 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8423 			       data_len);
8424 	if (!cmd) {
8425 		err = -ENOMEM;
8426 		goto clear_new_instance;
8427 	}
8428 
8429 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8430 				 add_ext_adv_data_complete);
8431 	if (err < 0) {
8432 		mgmt_pending_free(cmd);
8433 		goto clear_new_instance;
8434 	}
8435 
8436 	/* We were successful in updating data, so trigger advertising_added
8437 	 * event if this is an instance that wasn't previously advertising. If
8438 	 * a failure occurs in the requests we initiated, we will remove the
8439 	 * instance again in add_advertising_complete
8440 	 */
8441 	if (adv_instance->pending)
8442 		mgmt_advertising_added(sk, hdev, cp->instance);
8443 
8444 	goto unlock;
8445 
8446 clear_new_instance:
8447 	hci_remove_adv_instance(hdev, cp->instance);
8448 
8449 unlock:
8450 	hci_dev_unlock(hdev);
8451 
8452 	return err;
8453 }
8454 
8455 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8456 					int err)
8457 {
8458 	struct mgmt_pending_cmd *cmd = data;
8459 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8460 	struct mgmt_rp_remove_advertising rp;
8461 
8462 	bt_dev_dbg(hdev, "err %d", err);
8463 
8464 	memset(&rp, 0, sizeof(rp));
8465 	rp.instance = cp->instance;
8466 
8467 	if (err)
8468 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8469 				mgmt_status(err));
8470 	else
8471 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8472 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8473 
8474 	mgmt_pending_free(cmd);
8475 }
8476 
8477 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8478 {
8479 	struct mgmt_pending_cmd *cmd = data;
8480 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8481 	int err;
8482 
8483 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8484 	if (err)
8485 		return err;
8486 
8487 	if (list_empty(&hdev->adv_instances))
8488 		err = hci_disable_advertising_sync(hdev);
8489 
8490 	return err;
8491 }
8492 
8493 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8494 			      void *data, u16 data_len)
8495 {
8496 	struct mgmt_cp_remove_advertising *cp = data;
8497 	struct mgmt_pending_cmd *cmd;
8498 	int err;
8499 
8500 	bt_dev_dbg(hdev, "sock %p", sk);
8501 
8502 	hci_dev_lock(hdev);
8503 
8504 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8505 		err = mgmt_cmd_status(sk, hdev->id,
8506 				      MGMT_OP_REMOVE_ADVERTISING,
8507 				      MGMT_STATUS_INVALID_PARAMS);
8508 		goto unlock;
8509 	}
8510 
8511 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8512 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8513 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8514 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8515 				      MGMT_STATUS_BUSY);
8516 		goto unlock;
8517 	}
8518 
8519 	if (list_empty(&hdev->adv_instances)) {
8520 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8521 				      MGMT_STATUS_INVALID_PARAMS);
8522 		goto unlock;
8523 	}
8524 
8525 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8526 			       data_len);
8527 	if (!cmd) {
8528 		err = -ENOMEM;
8529 		goto unlock;
8530 	}
8531 
8532 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8533 				 remove_advertising_complete);
8534 	if (err < 0)
8535 		mgmt_pending_free(cmd);
8536 
8537 unlock:
8538 	hci_dev_unlock(hdev);
8539 
8540 	return err;
8541 }
8542 
8543 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8544 			     void *data, u16 data_len)
8545 {
8546 	struct mgmt_cp_get_adv_size_info *cp = data;
8547 	struct mgmt_rp_get_adv_size_info rp;
8548 	u32 flags, supported_flags;
8549 	int err;
8550 
8551 	bt_dev_dbg(hdev, "sock %p", sk);
8552 
8553 	if (!lmp_le_capable(hdev))
8554 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8555 				       MGMT_STATUS_REJECTED);
8556 
8557 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8559 				       MGMT_STATUS_INVALID_PARAMS);
8560 
8561 	flags = __le32_to_cpu(cp->flags);
8562 
8563 	/* The current implementation only supports a subset of the specified
8564 	 * flags.
8565 	 */
8566 	supported_flags = get_supported_adv_flags(hdev);
8567 	if (flags & ~supported_flags)
8568 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8569 				       MGMT_STATUS_INVALID_PARAMS);
8570 
8571 	rp.instance = cp->instance;
8572 	rp.flags = cp->flags;
8573 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8574 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8575 
8576 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8577 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8578 
8579 	return err;
8580 }
8581 
8582 static const struct hci_mgmt_handler mgmt_handlers[] = {
8583 	{ NULL }, /* 0x0000 (no command) */
8584 	{ read_version,            MGMT_READ_VERSION_SIZE,
8585 						HCI_MGMT_NO_HDEV |
8586 						HCI_MGMT_UNTRUSTED },
8587 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8588 						HCI_MGMT_NO_HDEV |
8589 						HCI_MGMT_UNTRUSTED },
8590 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8591 						HCI_MGMT_NO_HDEV |
8592 						HCI_MGMT_UNTRUSTED },
8593 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8594 						HCI_MGMT_UNTRUSTED },
8595 	{ set_powered,             MGMT_SETTING_SIZE },
8596 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8597 	{ set_connectable,         MGMT_SETTING_SIZE },
8598 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8599 	{ set_bondable,            MGMT_SETTING_SIZE },
8600 	{ set_link_security,       MGMT_SETTING_SIZE },
8601 	{ set_ssp,                 MGMT_SETTING_SIZE },
8602 	{ set_hs,                  MGMT_SETTING_SIZE },
8603 	{ set_le,                  MGMT_SETTING_SIZE },
8604 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8605 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8606 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8607 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8608 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8609 						HCI_MGMT_VAR_LEN },
8610 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8611 						HCI_MGMT_VAR_LEN },
8612 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8613 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8614 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8615 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8616 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8617 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8618 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8619 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8620 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8621 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8622 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8623 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8624 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8625 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8626 						HCI_MGMT_VAR_LEN },
8627 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8628 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8629 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8630 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8631 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8632 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8633 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8634 	{ set_advertising,         MGMT_SETTING_SIZE },
8635 	{ set_bredr,               MGMT_SETTING_SIZE },
8636 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8637 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8638 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8639 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8640 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8641 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8642 						HCI_MGMT_VAR_LEN },
8643 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8644 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8645 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8646 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8647 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8648 						HCI_MGMT_VAR_LEN },
8649 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8650 						HCI_MGMT_NO_HDEV |
8651 						HCI_MGMT_UNTRUSTED },
8652 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8653 						HCI_MGMT_UNCONFIGURED |
8654 						HCI_MGMT_UNTRUSTED },
8655 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8656 						HCI_MGMT_UNCONFIGURED },
8657 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8658 						HCI_MGMT_UNCONFIGURED },
8659 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8660 						HCI_MGMT_VAR_LEN },
8661 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8662 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8663 						HCI_MGMT_NO_HDEV |
8664 						HCI_MGMT_UNTRUSTED },
8665 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8666 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8667 						HCI_MGMT_VAR_LEN },
8668 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8669 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8670 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8671 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8672 						HCI_MGMT_UNTRUSTED },
8673 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8674 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8675 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8676 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8677 						HCI_MGMT_VAR_LEN },
8678 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8679 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8680 						HCI_MGMT_UNTRUSTED },
8681 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8682 						HCI_MGMT_UNTRUSTED |
8683 						HCI_MGMT_HDEV_OPTIONAL },
8684 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8685 						HCI_MGMT_VAR_LEN |
8686 						HCI_MGMT_HDEV_OPTIONAL },
8687 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8688 						HCI_MGMT_UNTRUSTED },
8689 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8690 						HCI_MGMT_VAR_LEN },
8691 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8692 						HCI_MGMT_UNTRUSTED },
8693 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8694 						HCI_MGMT_VAR_LEN },
8695 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8696 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8697 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8698 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8699 						HCI_MGMT_VAR_LEN },
8700 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8701 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8702 						HCI_MGMT_VAR_LEN },
8703 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8704 						HCI_MGMT_VAR_LEN },
8705 	{ add_adv_patterns_monitor_rssi,
8706 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8707 						HCI_MGMT_VAR_LEN },
8708 };
8709 
8710 void mgmt_index_added(struct hci_dev *hdev)
8711 {
8712 	struct mgmt_ev_ext_index ev;
8713 
8714 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8715 		return;
8716 
8717 	switch (hdev->dev_type) {
8718 	case HCI_PRIMARY:
8719 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8720 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8721 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8722 			ev.type = 0x01;
8723 		} else {
8724 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8725 					 HCI_MGMT_INDEX_EVENTS);
8726 			ev.type = 0x00;
8727 		}
8728 		break;
8729 	case HCI_AMP:
8730 		ev.type = 0x02;
8731 		break;
8732 	default:
8733 		return;
8734 	}
8735 
8736 	ev.bus = hdev->bus;
8737 
8738 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8739 			 HCI_MGMT_EXT_INDEX_EVENTS);
8740 }
8741 
8742 void mgmt_index_removed(struct hci_dev *hdev)
8743 {
8744 	struct mgmt_ev_ext_index ev;
8745 	u8 status = MGMT_STATUS_INVALID_INDEX;
8746 
8747 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8748 		return;
8749 
8750 	switch (hdev->dev_type) {
8751 	case HCI_PRIMARY:
8752 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8753 
8754 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8755 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8756 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8757 			ev.type = 0x01;
8758 		} else {
8759 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8760 					 HCI_MGMT_INDEX_EVENTS);
8761 			ev.type = 0x00;
8762 		}
8763 		break;
8764 	case HCI_AMP:
8765 		ev.type = 0x02;
8766 		break;
8767 	default:
8768 		return;
8769 	}
8770 
8771 	ev.bus = hdev->bus;
8772 
8773 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8774 			 HCI_MGMT_EXT_INDEX_EVENTS);
8775 }
8776 
8777 void mgmt_power_on(struct hci_dev *hdev, int err)
8778 {
8779 	struct cmd_lookup match = { NULL, hdev };
8780 
8781 	bt_dev_dbg(hdev, "err %d", err);
8782 
8783 	hci_dev_lock(hdev);
8784 
8785 	if (!err) {
8786 		restart_le_actions(hdev);
8787 		hci_update_passive_scan(hdev);
8788 	}
8789 
8790 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8791 
8792 	new_settings(hdev, match.sk);
8793 
8794 	if (match.sk)
8795 		sock_put(match.sk);
8796 
8797 	hci_dev_unlock(hdev);
8798 }
8799 
8800 void __mgmt_power_off(struct hci_dev *hdev)
8801 {
8802 	struct cmd_lookup match = { NULL, hdev };
8803 	u8 status, zero_cod[] = { 0, 0, 0 };
8804 
8805 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8806 
8807 	/* If the power off is because of hdev unregistration let
8808 	 * use the appropriate INVALID_INDEX status. Otherwise use
8809 	 * NOT_POWERED. We cover both scenarios here since later in
8810 	 * mgmt_index_removed() any hci_conn callbacks will have already
8811 	 * been triggered, potentially causing misleading DISCONNECTED
8812 	 * status responses.
8813 	 */
8814 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8815 		status = MGMT_STATUS_INVALID_INDEX;
8816 	else
8817 		status = MGMT_STATUS_NOT_POWERED;
8818 
8819 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8820 
8821 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8822 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8823 				   zero_cod, sizeof(zero_cod),
8824 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8825 		ext_info_changed(hdev, NULL);
8826 	}
8827 
8828 	new_settings(hdev, match.sk);
8829 
8830 	if (match.sk)
8831 		sock_put(match.sk);
8832 }
8833 
8834 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8835 {
8836 	struct mgmt_pending_cmd *cmd;
8837 	u8 status;
8838 
8839 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8840 	if (!cmd)
8841 		return;
8842 
8843 	if (err == -ERFKILL)
8844 		status = MGMT_STATUS_RFKILLED;
8845 	else
8846 		status = MGMT_STATUS_FAILED;
8847 
8848 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8849 
8850 	mgmt_pending_remove(cmd);
8851 }
8852 
8853 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8854 		       bool persistent)
8855 {
8856 	struct mgmt_ev_new_link_key ev;
8857 
8858 	memset(&ev, 0, sizeof(ev));
8859 
8860 	ev.store_hint = persistent;
8861 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8862 	ev.key.addr.type = BDADDR_BREDR;
8863 	ev.key.type = key->type;
8864 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8865 	ev.key.pin_len = key->pin_len;
8866 
8867 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8868 }
8869 
8870 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8871 {
8872 	switch (ltk->type) {
8873 	case SMP_LTK:
8874 	case SMP_LTK_RESPONDER:
8875 		if (ltk->authenticated)
8876 			return MGMT_LTK_AUTHENTICATED;
8877 		return MGMT_LTK_UNAUTHENTICATED;
8878 	case SMP_LTK_P256:
8879 		if (ltk->authenticated)
8880 			return MGMT_LTK_P256_AUTH;
8881 		return MGMT_LTK_P256_UNAUTH;
8882 	case SMP_LTK_P256_DEBUG:
8883 		return MGMT_LTK_P256_DEBUG;
8884 	}
8885 
8886 	return MGMT_LTK_UNAUTHENTICATED;
8887 }
8888 
8889 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8890 {
8891 	struct mgmt_ev_new_long_term_key ev;
8892 
8893 	memset(&ev, 0, sizeof(ev));
8894 
8895 	/* Devices using resolvable or non-resolvable random addresses
8896 	 * without providing an identity resolving key don't require
8897 	 * to store long term keys. Their addresses will change the
8898 	 * next time around.
8899 	 *
8900 	 * Only when a remote device provides an identity address
8901 	 * make sure the long term key is stored. If the remote
8902 	 * identity is known, the long term keys are internally
8903 	 * mapped to the identity address. So allow static random
8904 	 * and public addresses here.
8905 	 */
8906 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8907 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8908 		ev.store_hint = 0x00;
8909 	else
8910 		ev.store_hint = persistent;
8911 
8912 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8913 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8914 	ev.key.type = mgmt_ltk_type(key);
8915 	ev.key.enc_size = key->enc_size;
8916 	ev.key.ediv = key->ediv;
8917 	ev.key.rand = key->rand;
8918 
8919 	if (key->type == SMP_LTK)
8920 		ev.key.initiator = 1;
8921 
8922 	/* Make sure we copy only the significant bytes based on the
8923 	 * encryption key size, and set the rest of the value to zeroes.
8924 	 */
8925 	memcpy(ev.key.val, key->val, key->enc_size);
8926 	memset(ev.key.val + key->enc_size, 0,
8927 	       sizeof(ev.key.val) - key->enc_size);
8928 
8929 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8930 }
8931 
8932 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8933 {
8934 	struct mgmt_ev_new_irk ev;
8935 
8936 	memset(&ev, 0, sizeof(ev));
8937 
8938 	ev.store_hint = persistent;
8939 
8940 	bacpy(&ev.rpa, &irk->rpa);
8941 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8942 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8943 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8944 
8945 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8946 }
8947 
8948 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8949 		   bool persistent)
8950 {
8951 	struct mgmt_ev_new_csrk ev;
8952 
8953 	memset(&ev, 0, sizeof(ev));
8954 
8955 	/* Devices using resolvable or non-resolvable random addresses
8956 	 * without providing an identity resolving key don't require
8957 	 * to store signature resolving keys. Their addresses will change
8958 	 * the next time around.
8959 	 *
8960 	 * Only when a remote device provides an identity address
8961 	 * make sure the signature resolving key is stored. So allow
8962 	 * static random and public addresses here.
8963 	 */
8964 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8965 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8966 		ev.store_hint = 0x00;
8967 	else
8968 		ev.store_hint = persistent;
8969 
8970 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8971 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8972 	ev.key.type = csrk->type;
8973 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8974 
8975 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8976 }
8977 
8978 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8979 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8980 			 u16 max_interval, u16 latency, u16 timeout)
8981 {
8982 	struct mgmt_ev_new_conn_param ev;
8983 
8984 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8985 		return;
8986 
8987 	memset(&ev, 0, sizeof(ev));
8988 	bacpy(&ev.addr.bdaddr, bdaddr);
8989 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8990 	ev.store_hint = store_hint;
8991 	ev.min_interval = cpu_to_le16(min_interval);
8992 	ev.max_interval = cpu_to_le16(max_interval);
8993 	ev.latency = cpu_to_le16(latency);
8994 	ev.timeout = cpu_to_le16(timeout);
8995 
8996 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8997 }
8998 
8999 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9000 			   u8 *name, u8 name_len)
9001 {
9002 	char buf[512];
9003 	struct mgmt_ev_device_connected *ev = (void *) buf;
9004 	u16 eir_len = 0;
9005 	u32 flags = 0;
9006 
9007 	bacpy(&ev->addr.bdaddr, &conn->dst);
9008 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9009 
9010 	if (conn->out)
9011 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9012 
9013 	ev->flags = __cpu_to_le32(flags);
9014 
9015 	/* We must ensure that the EIR Data fields are ordered and
9016 	 * unique. Keep it simple for now and avoid the problem by not
9017 	 * adding any BR/EDR data to the LE adv.
9018 	 */
9019 	if (conn->le_adv_data_len > 0) {
9020 		memcpy(&ev->eir[eir_len],
9021 		       conn->le_adv_data, conn->le_adv_data_len);
9022 		eir_len = conn->le_adv_data_len;
9023 	} else {
9024 		if (name_len > 0)
9025 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9026 						  name, name_len);
9027 
9028 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9029 			eir_len = eir_append_data(ev->eir, eir_len,
9030 						  EIR_CLASS_OF_DEV,
9031 						  conn->dev_class, 3);
9032 	}
9033 
9034 	ev->eir_len = cpu_to_le16(eir_len);
9035 
9036 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9037 		    sizeof(*ev) + eir_len, NULL);
9038 }
9039 
9040 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9041 {
9042 	struct sock **sk = data;
9043 
9044 	cmd->cmd_complete(cmd, 0);
9045 
9046 	*sk = cmd->sk;
9047 	sock_hold(*sk);
9048 
9049 	mgmt_pending_remove(cmd);
9050 }
9051 
9052 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9053 {
9054 	struct hci_dev *hdev = data;
9055 	struct mgmt_cp_unpair_device *cp = cmd->param;
9056 
9057 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9058 
9059 	cmd->cmd_complete(cmd, 0);
9060 	mgmt_pending_remove(cmd);
9061 }
9062 
9063 bool mgmt_powering_down(struct hci_dev *hdev)
9064 {
9065 	struct mgmt_pending_cmd *cmd;
9066 	struct mgmt_mode *cp;
9067 
9068 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9069 	if (!cmd)
9070 		return false;
9071 
9072 	cp = cmd->param;
9073 	if (!cp->val)
9074 		return true;
9075 
9076 	return false;
9077 }
9078 
9079 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9080 			      u8 link_type, u8 addr_type, u8 reason,
9081 			      bool mgmt_connected)
9082 {
9083 	struct mgmt_ev_device_disconnected ev;
9084 	struct sock *sk = NULL;
9085 
9086 	/* The connection is still in hci_conn_hash so test for 1
9087 	 * instead of 0 to know if this is the last one.
9088 	 */
9089 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9090 		cancel_delayed_work(&hdev->power_off);
9091 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9092 	}
9093 
9094 	if (!mgmt_connected)
9095 		return;
9096 
9097 	if (link_type != ACL_LINK && link_type != LE_LINK)
9098 		return;
9099 
9100 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9101 
9102 	bacpy(&ev.addr.bdaddr, bdaddr);
9103 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9104 	ev.reason = reason;
9105 
9106 	/* Report disconnects due to suspend */
9107 	if (hdev->suspended)
9108 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9109 
9110 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9111 
9112 	if (sk)
9113 		sock_put(sk);
9114 
9115 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9116 			     hdev);
9117 }
9118 
9119 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9120 			    u8 link_type, u8 addr_type, u8 status)
9121 {
9122 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9123 	struct mgmt_cp_disconnect *cp;
9124 	struct mgmt_pending_cmd *cmd;
9125 
9126 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9127 			     hdev);
9128 
9129 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9130 	if (!cmd)
9131 		return;
9132 
9133 	cp = cmd->param;
9134 
9135 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9136 		return;
9137 
9138 	if (cp->addr.type != bdaddr_type)
9139 		return;
9140 
9141 	cmd->cmd_complete(cmd, mgmt_status(status));
9142 	mgmt_pending_remove(cmd);
9143 }
9144 
9145 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9146 			 u8 addr_type, u8 status)
9147 {
9148 	struct mgmt_ev_connect_failed ev;
9149 
9150 	/* The connection is still in hci_conn_hash so test for 1
9151 	 * instead of 0 to know if this is the last one.
9152 	 */
9153 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9154 		cancel_delayed_work(&hdev->power_off);
9155 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9156 	}
9157 
9158 	bacpy(&ev.addr.bdaddr, bdaddr);
9159 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9160 	ev.status = mgmt_status(status);
9161 
9162 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9163 }
9164 
9165 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9166 {
9167 	struct mgmt_ev_pin_code_request ev;
9168 
9169 	bacpy(&ev.addr.bdaddr, bdaddr);
9170 	ev.addr.type = BDADDR_BREDR;
9171 	ev.secure = secure;
9172 
9173 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9174 }
9175 
9176 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9177 				  u8 status)
9178 {
9179 	struct mgmt_pending_cmd *cmd;
9180 
9181 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9182 	if (!cmd)
9183 		return;
9184 
9185 	cmd->cmd_complete(cmd, mgmt_status(status));
9186 	mgmt_pending_remove(cmd);
9187 }
9188 
9189 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9190 				      u8 status)
9191 {
9192 	struct mgmt_pending_cmd *cmd;
9193 
9194 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9195 	if (!cmd)
9196 		return;
9197 
9198 	cmd->cmd_complete(cmd, mgmt_status(status));
9199 	mgmt_pending_remove(cmd);
9200 }
9201 
9202 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9203 			      u8 link_type, u8 addr_type, u32 value,
9204 			      u8 confirm_hint)
9205 {
9206 	struct mgmt_ev_user_confirm_request ev;
9207 
9208 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9209 
9210 	bacpy(&ev.addr.bdaddr, bdaddr);
9211 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9212 	ev.confirm_hint = confirm_hint;
9213 	ev.value = cpu_to_le32(value);
9214 
9215 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9216 			  NULL);
9217 }
9218 
9219 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9220 			      u8 link_type, u8 addr_type)
9221 {
9222 	struct mgmt_ev_user_passkey_request ev;
9223 
9224 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9225 
9226 	bacpy(&ev.addr.bdaddr, bdaddr);
9227 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9228 
9229 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9230 			  NULL);
9231 }
9232 
9233 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9234 				      u8 link_type, u8 addr_type, u8 status,
9235 				      u8 opcode)
9236 {
9237 	struct mgmt_pending_cmd *cmd;
9238 
9239 	cmd = pending_find(opcode, hdev);
9240 	if (!cmd)
9241 		return -ENOENT;
9242 
9243 	cmd->cmd_complete(cmd, mgmt_status(status));
9244 	mgmt_pending_remove(cmd);
9245 
9246 	return 0;
9247 }
9248 
9249 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9250 				     u8 link_type, u8 addr_type, u8 status)
9251 {
9252 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9253 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9254 }
9255 
9256 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9257 					 u8 link_type, u8 addr_type, u8 status)
9258 {
9259 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9260 					  status,
9261 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9262 }
9263 
9264 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9265 				     u8 link_type, u8 addr_type, u8 status)
9266 {
9267 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9268 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9269 }
9270 
9271 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9272 					 u8 link_type, u8 addr_type, u8 status)
9273 {
9274 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9275 					  status,
9276 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9277 }
9278 
9279 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9280 			     u8 link_type, u8 addr_type, u32 passkey,
9281 			     u8 entered)
9282 {
9283 	struct mgmt_ev_passkey_notify ev;
9284 
9285 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9286 
9287 	bacpy(&ev.addr.bdaddr, bdaddr);
9288 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9289 	ev.passkey = __cpu_to_le32(passkey);
9290 	ev.entered = entered;
9291 
9292 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9293 }
9294 
9295 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9296 {
9297 	struct mgmt_ev_auth_failed ev;
9298 	struct mgmt_pending_cmd *cmd;
9299 	u8 status = mgmt_status(hci_status);
9300 
9301 	bacpy(&ev.addr.bdaddr, &conn->dst);
9302 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9303 	ev.status = status;
9304 
9305 	cmd = find_pairing(conn);
9306 
9307 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9308 		    cmd ? cmd->sk : NULL);
9309 
9310 	if (cmd) {
9311 		cmd->cmd_complete(cmd, status);
9312 		mgmt_pending_remove(cmd);
9313 	}
9314 }
9315 
9316 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9317 {
9318 	struct cmd_lookup match = { NULL, hdev };
9319 	bool changed;
9320 
9321 	if (status) {
9322 		u8 mgmt_err = mgmt_status(status);
9323 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9324 				     cmd_status_rsp, &mgmt_err);
9325 		return;
9326 	}
9327 
9328 	if (test_bit(HCI_AUTH, &hdev->flags))
9329 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9330 	else
9331 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9332 
9333 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9334 			     &match);
9335 
9336 	if (changed)
9337 		new_settings(hdev, match.sk);
9338 
9339 	if (match.sk)
9340 		sock_put(match.sk);
9341 }
9342 
9343 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9344 {
9345 	struct cmd_lookup *match = data;
9346 
9347 	if (match->sk == NULL) {
9348 		match->sk = cmd->sk;
9349 		sock_hold(match->sk);
9350 	}
9351 }
9352 
9353 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9354 				    u8 status)
9355 {
9356 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9357 
9358 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9359 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9360 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9361 
9362 	if (!status) {
9363 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9364 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9365 		ext_info_changed(hdev, NULL);
9366 	}
9367 
9368 	if (match.sk)
9369 		sock_put(match.sk);
9370 }
9371 
9372 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9373 {
9374 	struct mgmt_cp_set_local_name ev;
9375 	struct mgmt_pending_cmd *cmd;
9376 
9377 	if (status)
9378 		return;
9379 
9380 	memset(&ev, 0, sizeof(ev));
9381 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9382 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9383 
9384 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9385 	if (!cmd) {
9386 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9387 
9388 		/* If this is a HCI command related to powering on the
9389 		 * HCI dev don't send any mgmt signals.
9390 		 */
9391 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9392 			return;
9393 	}
9394 
9395 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9396 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9397 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9398 }
9399 
9400 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9401 {
9402 	int i;
9403 
9404 	for (i = 0; i < uuid_count; i++) {
9405 		if (!memcmp(uuid, uuids[i], 16))
9406 			return true;
9407 	}
9408 
9409 	return false;
9410 }
9411 
9412 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9413 {
9414 	u16 parsed = 0;
9415 
9416 	while (parsed < eir_len) {
9417 		u8 field_len = eir[0];
9418 		u8 uuid[16];
9419 		int i;
9420 
9421 		if (field_len == 0)
9422 			break;
9423 
9424 		if (eir_len - parsed < field_len + 1)
9425 			break;
9426 
9427 		switch (eir[1]) {
9428 		case EIR_UUID16_ALL:
9429 		case EIR_UUID16_SOME:
9430 			for (i = 0; i + 3 <= field_len; i += 2) {
9431 				memcpy(uuid, bluetooth_base_uuid, 16);
9432 				uuid[13] = eir[i + 3];
9433 				uuid[12] = eir[i + 2];
9434 				if (has_uuid(uuid, uuid_count, uuids))
9435 					return true;
9436 			}
9437 			break;
9438 		case EIR_UUID32_ALL:
9439 		case EIR_UUID32_SOME:
9440 			for (i = 0; i + 5 <= field_len; i += 4) {
9441 				memcpy(uuid, bluetooth_base_uuid, 16);
9442 				uuid[15] = eir[i + 5];
9443 				uuid[14] = eir[i + 4];
9444 				uuid[13] = eir[i + 3];
9445 				uuid[12] = eir[i + 2];
9446 				if (has_uuid(uuid, uuid_count, uuids))
9447 					return true;
9448 			}
9449 			break;
9450 		case EIR_UUID128_ALL:
9451 		case EIR_UUID128_SOME:
9452 			for (i = 0; i + 17 <= field_len; i += 16) {
9453 				memcpy(uuid, eir + i + 2, 16);
9454 				if (has_uuid(uuid, uuid_count, uuids))
9455 					return true;
9456 			}
9457 			break;
9458 		}
9459 
9460 		parsed += field_len + 1;
9461 		eir += field_len + 1;
9462 	}
9463 
9464 	return false;
9465 }
9466 
9467 static void restart_le_scan(struct hci_dev *hdev)
9468 {
9469 	/* If controller is not scanning we are done. */
9470 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9471 		return;
9472 
9473 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9474 		       hdev->discovery.scan_start +
9475 		       hdev->discovery.scan_duration))
9476 		return;
9477 
9478 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9479 			   DISCOV_LE_RESTART_DELAY);
9480 }
9481 
9482 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9483 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9484 {
9485 	/* If a RSSI threshold has been specified, and
9486 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9487 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9488 	 * is set, let it through for further processing, as we might need to
9489 	 * restart the scan.
9490 	 *
9491 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9492 	 * the results are also dropped.
9493 	 */
9494 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9495 	    (rssi == HCI_RSSI_INVALID ||
9496 	    (rssi < hdev->discovery.rssi &&
9497 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9498 		return  false;
9499 
9500 	if (hdev->discovery.uuid_count != 0) {
9501 		/* If a list of UUIDs is provided in filter, results with no
9502 		 * matching UUID should be dropped.
9503 		 */
9504 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9505 				   hdev->discovery.uuids) &&
9506 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9507 				   hdev->discovery.uuid_count,
9508 				   hdev->discovery.uuids))
9509 			return false;
9510 	}
9511 
9512 	/* If duplicate filtering does not report RSSI changes, then restart
9513 	 * scanning to ensure updated result with updated RSSI values.
9514 	 */
9515 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9516 		restart_le_scan(hdev);
9517 
9518 		/* Validate RSSI value against the RSSI threshold once more. */
9519 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9520 		    rssi < hdev->discovery.rssi)
9521 			return false;
9522 	}
9523 
9524 	return true;
9525 }
9526 
9527 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9528 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9529 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9530 {
9531 	char buf[512];
9532 	struct mgmt_ev_device_found *ev = (void *)buf;
9533 	size_t ev_size;
9534 
9535 	/* Don't send events for a non-kernel initiated discovery. With
9536 	 * LE one exception is if we have pend_le_reports > 0 in which
9537 	 * case we're doing passive scanning and want these events.
9538 	 */
9539 	if (!hci_discovery_active(hdev)) {
9540 		if (link_type == ACL_LINK)
9541 			return;
9542 		if (link_type == LE_LINK &&
9543 		    list_empty(&hdev->pend_le_reports) &&
9544 		    !hci_is_adv_monitoring(hdev)) {
9545 			return;
9546 		}
9547 	}
9548 
9549 	if (hdev->discovery.result_filtering) {
9550 		/* We are using service discovery */
9551 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9552 				     scan_rsp_len))
9553 			return;
9554 	}
9555 
9556 	if (hdev->discovery.limited) {
9557 		/* Check for limited discoverable bit */
9558 		if (dev_class) {
9559 			if (!(dev_class[1] & 0x20))
9560 				return;
9561 		} else {
9562 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9563 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9564 				return;
9565 		}
9566 	}
9567 
9568 	/* Make sure that the buffer is big enough. The 5 extra bytes
9569 	 * are for the potential CoD field.
9570 	 */
9571 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9572 		return;
9573 
9574 	memset(buf, 0, sizeof(buf));
9575 
9576 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9577 	 * RSSI value was reported as 0 when not available. This behavior
9578 	 * is kept when using device discovery. This is required for full
9579 	 * backwards compatibility with the API.
9580 	 *
9581 	 * However when using service discovery, the value 127 will be
9582 	 * returned when the RSSI is not available.
9583 	 */
9584 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9585 	    link_type == ACL_LINK)
9586 		rssi = 0;
9587 
9588 	bacpy(&ev->addr.bdaddr, bdaddr);
9589 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9590 	ev->rssi = rssi;
9591 	ev->flags = cpu_to_le32(flags);
9592 
9593 	if (eir_len > 0)
9594 		/* Copy EIR or advertising data into event */
9595 		memcpy(ev->eir, eir, eir_len);
9596 
9597 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9598 				       NULL))
9599 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9600 					  dev_class, 3);
9601 
9602 	if (scan_rsp_len > 0)
9603 		/* Append scan response data to event */
9604 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9605 
9606 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9607 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9608 
9609 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9610 }
9611 
9612 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9613 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9614 {
9615 	struct mgmt_ev_device_found *ev;
9616 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9617 	u16 eir_len;
9618 
9619 	ev = (struct mgmt_ev_device_found *) buf;
9620 
9621 	memset(buf, 0, sizeof(buf));
9622 
9623 	bacpy(&ev->addr.bdaddr, bdaddr);
9624 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9625 	ev->rssi = rssi;
9626 
9627 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9628 				  name_len);
9629 
9630 	ev->eir_len = cpu_to_le16(eir_len);
9631 
9632 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9633 }
9634 
9635 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9636 {
9637 	struct mgmt_ev_discovering ev;
9638 
9639 	bt_dev_dbg(hdev, "discovering %u", discovering);
9640 
9641 	memset(&ev, 0, sizeof(ev));
9642 	ev.type = hdev->discovery.type;
9643 	ev.discovering = discovering;
9644 
9645 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9646 }
9647 
9648 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9649 {
9650 	struct mgmt_ev_controller_suspend ev;
9651 
9652 	ev.suspend_state = state;
9653 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9654 }
9655 
9656 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9657 		   u8 addr_type)
9658 {
9659 	struct mgmt_ev_controller_resume ev;
9660 
9661 	ev.wake_reason = reason;
9662 	if (bdaddr) {
9663 		bacpy(&ev.addr.bdaddr, bdaddr);
9664 		ev.addr.type = addr_type;
9665 	} else {
9666 		memset(&ev.addr, 0, sizeof(ev.addr));
9667 	}
9668 
9669 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9670 }
9671 
9672 static struct hci_mgmt_chan chan = {
9673 	.channel	= HCI_CHANNEL_CONTROL,
9674 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9675 	.handlers	= mgmt_handlers,
9676 	.hdev_init	= mgmt_init_hdev,
9677 };
9678 
9679 int mgmt_init(void)
9680 {
9681 	return hci_mgmt_chan_register(&chan);
9682 }
9683 
9684 void mgmt_exit(void)
9685 {
9686 	hci_mgmt_chan_unregister(&chan);
9687 }
9688